input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
import ctypes
import os
import sys
import logging
from . import timer
from .exit import has_exit_hooks, invoke_exit_callbacks
from .globs import (DEFAULT_BASIC_BLOCK_LIMIT,
DEFAULT_FUZZ_CONSUMPTION_TIMEOUT, DEFAULT_MAX_INTERRUPTS,
FUZZ_MODES, TRIGGER_MODES)
from .mmio_models.wrapper import mmio_access_handler_wrapper_hook
logger = logging.getLogger("emulator")
""" native.py
Wrapper around the native library API functions.
"""
native_lib = None
mmio_cb_wrapper = None
timer_cb_wrapper = None
timer_cb_user_data = None
# just like unicorn does we need to keep references to ctype cb objects
obj_refs = []
uc_engine = ctypes.c_void_p
# Prototyping code taken from unicorn python bindings
def _load_lib(path):
try:
lib_file = os.path.join(path)
dll = ctypes.cdll.LoadLibrary(lib_file)
return dll
except OSError as e:
logger.error(f'FAILED to load {lib_file} {e}')
return None
# setup all the function prototype
def _setup_prototype(lib, fname, restype, *argtypes):
getattr(lib, fname).restype = restype
getattr(lib, fname).argtypes = argtypes
EXIT_CB = ctypes.CFUNCTYPE(
None, ctypes.c_int, ctypes.c_int
)
UC_HOOK_CODE_CB = ctypes.CFUNCTYPE(
None, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p
)
UC_HOOK_MEM_ACCESS_CB = ctypes.CFUNCTYPE(
None, uc_engine, ctypes.c_int,
ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p
)
UC_HOOK_INTR_CB = ctypes.CFUNCTYPE(
None, uc_engine, ctypes.c_uint32, ctypes.c_void_p
)
mmio_user_data = None
def add_mmio_region(uc, start, end):
global mmio_user_data
if mmio_user_data is None:
mmio_user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
assert native_lib.add_mmio_region(uc._uch, start, end, mmio_user_data)==0
def load_fuzz(file_path):
assert native_lib.load_fuzz(file_path.encode())==0
sys.stdout.flush()
def emulate(uc, fuzz_file_path, prefix_input_file_path=None):
# uc_err emulate(uc_engine *uc, char *input_path, uint64_t instr_limit, char *prefix_input_path);
if prefix_input_file_path:
prefix_input_file_path = prefix_input_file_path.encode()
else:
# In case input path is an empty string, set it to None explicitly
prefix_input_file_path = None
native_lib.emulate(uc._uch, fuzz_file_path.encode(), prefix_input_file_path)
def get_fuzz(uc, size):
ptr = (ctypes.c_char * size).from_address(native_lib.get_fuzz_ptr(uc, size))
return ptr.raw
def fuzz_consumed():
return native_lib.fuzz_consumed()
def fuzz_remaining():
return native_lib.fuzz_remaining()
def get_latest_mmio_fuzz_access_size():
return native_lib.get_latest_mmio_fuzz_access_size()
def get_latest_mmio_fuzz_access_index():
return native_lib.get_latest_mmio_fuzz_access_index()
def register_cond_py_handler_hook(uc, handler_locs):
if not handler_locs:
logger.warning("no function handler hooks registered, skipping registration")
return
arr = (ctypes.c_int64 * len(handler_locs))(*handler_locs)
# hack: In order to keep a uc reference around for the high level callback,
# we sneak an additional callback into the uc object (as done in unicorn.py)
from .user_hooks import func_hook_handler
callback = func_hook_handler
uc._callback_count += 1
uc._callbacks[uc._callback_count] = (callback, None)
cb = ctypes.cast(UC_HOOK_CODE_CB(uc._hookcode_cb), UC_HOOK_CODE_CB)
user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
assert native_lib.register_cond_py_handler_hook(
uc._uch, cb, arr, len(arr), user_data
) == 0
obj_refs.append(cb)
def remove_function_handler_hook_address(uc, address):
assert native_lib.remove_function_handler_hook_address(uc._uch, address) == 0
def _create_and_inject_c_callable_mem_hook(uc, py_fn):
# hack: In order to keep a uc reference around for the high level callback,
# we sneak an additional callback into the uc object (as done in unicorn.py)
callback = py_fn
uc._callback_count += 1
uc._callbacks[uc._callback_count] = (callback, None)
cb = ctypes.cast(UC_HOOK_MEM_ACCESS_CB(uc._hook_mem_access_cb), UC_HOOK_MEM_ACCESS_CB)
user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
obj_refs.append(cb)
return cb, user_data
def _create_and_inject_c_callable_central_timer_hook(uc, py_fn):
callback = py_fn
# hack: In order to keep a uc reference around for the high level callback,
# we sneak an additional callback into the uc object (as done in unicorn.py)
# even bigger hack: we re-use the prototype of interrupt callbacks for the fact of their function prototype
# to create an alternative callback
# from: cb(self, intno, data)
# to : cb(self, timer_id, data)
uc._callback_count += 1
uc._callbacks[uc._callback_count] = (callback, None)
cb = ctypes.cast(UC_HOOK_INTR_CB(uc._hook_intr_cb), UC_HOOK_INTR_CB)
user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
obj_refs.append(cb)
return cb, user_data
def register_py_handled_mmio_ranges(uc, python_handled_range_starts, python_handled_range_ends):
global mmio_cb_wrapper
assert mmio_cb_wrapper is not None
assert len(python_handled_range_starts) == len(python_handled_range_ends)
starts_arr = (ctypes.c_int64 * len(python_handled_range_starts))(*python_handled_range_starts)
ends_arr = (ctypes.c_int64 * len(python_handled_range_ends))(*python_handled_range_ends)
assert native_lib.register_py_handled_mmio_ranges(uc._uch, mmio_cb_wrapper, starts_arr, ends_arr, len(python_handled_range_ends)) == 0
def register_linear_mmio_models(uc, starts, ends, pcs, init_vals, steps):
assert len(starts) == len(ends) == len(init_vals) == len(steps)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
init_vals_arr = (ctypes.c_int32 * len(init_vals))(*init_vals)
steps_arr = (ctypes.c_int32 * len(steps))(*steps)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
assert native_lib.register_linear_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, init_vals_arr, steps_arr, len(starts)) == 0
def register_constant_mmio_models(uc, starts, ends, pcs, vals):
assert len(starts) == len(ends) == len(vals)==len(pcs)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
vals_arr = (ctypes.c_int32 * len(vals))(*vals)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
assert native_lib.register_constant_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, vals_arr, len(starts)) == 0
def register_bitextract_mmio_models(uc, starts, ends, pcs, byte_sizes, left_shifts, masks):
assert len(starts) == len(ends) == len(byte_sizes) == len(left_shifts) == len(pcs)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
byte_sizes_arr = (ctypes.c_int8 * len(byte_sizes))(*byte_sizes)
left_shifts_arr = (ctypes.c_int8 * len(left_shifts))(*left_shifts)
masks_arr = (ctypes.c_int32 * len(masks))(*masks)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
assert native_lib.register_bitextract_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, byte_sizes_arr, left_shifts_arr, masks_arr, len(starts)) == 0
def register_value_set_mmio_models(uc, starts, ends, pcs, value_sets):
assert len(starts) == len(ends) == len(value_sets) == len(value_sets) == len(pcs)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
value_nums_arr = (ctypes.c_int32 * len(value_sets))(*[len(value_set) for value_set in value_sets])
value_set_arrs = [(ctypes.c_int32 * len(value_set))(*value_set) for value_set in value_sets]
value_sets_arr_ptrs = (ctypes.POINTER(ctypes.c_ulong) * len(value_set_arrs))(*[ctypes.cast(value_set_arr, ctypes.POINTER(ctypes.c_ulong)) for value_set_arr in value_set_arrs])
assert native_lib.register_value_set_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, value_nums_arr, value_sets_arr_ptrs, len(starts)) == 0
def set_ignored_mmio_addresses(addresses, pcs):
addrs_arr = (ctypes.c_int64 * len(addresses))(*addresses)
pcs_arr = (ctypes.c_uint32 * len(pcs))(*pcs)
assert native_lib.set_ignored_mmio_addresses(
addrs_arr, pcs_arr, len(addrs_arr)
) == 0
def init_nvic(uc, vtor, num_vecs, interrupt_limit=DEFAULT_MAX_INTERRUPTS, disabled_interrupts=()):
global native_lib
logger.debug("Calling init_nvic with vtor=0x{:08x}, num_vecs: {}".format(vtor, num_vecs))
disabled_interrupts_arr = (ctypes.c_int32 * len(disabled_interrupts))(*disabled_interrupts)
assert native_lib.init_nvic(uc._uch, vtor, num_vecs, interrupt_limit, len(disabled_interrupts), disabled_interrupts_arr) == 0
def init_native_tracing(uc, bbl_set_trace_path, bbl_hash_path, mmio_set_trace_path, mmio_ranges):
global native_lib
mmio_region_starts, mmio_region_ends = zip(*mmio_ranges)
mmio_region_starts_arr = (ctypes.c_uint64 * len(mmio_region_starts))(*mmio_region_starts)
mmio_region_ends_arr = (ctypes.c_uint64 * len(mmio_region_ends))(*mmio_region_ends)
if not bbl_set_trace_path:
bbl_set_trace_path = None
else:
bbl_set_trace_path = bbl_set_trace_path.encode()
if not mmio_set_trace_path:
mmio_set_trace_path = None
else:
mmio_set_trace_path = mmio_set_trace_path.encode()
if not bbl_hash_path:
bbl_hash_path = None
else:
bbl_hash_path = bbl_hash_path.encode()
assert(native_lib.init_tracing(uc._uch, bbl_set_trace_path, bbl_hash_path, mmio_set_trace_path, len(mmio_ranges), mmio_region_starts_arr, mmio_region_ends_arr) == 0)
def nvic_set_pending(vec_num):
global native_lib
native_lib.nvic_set_pending(vec_num)
def init_timer_hook(uc, global_timer_scale):
global native_lib
global timer_cb_user_data
global timer_cb_wrapper
cb, user_data = _create_and_inject_c_callable_central_timer_hook(uc, timer.central_timer_hook)
timer_cb_wrapper = cb
timer_cb_user_data = user_data
assert native_lib.init_timer_hook(uc._uch, global_timer_scale) == 0
def init_systick(uc, reload_val):
global native_lib
assert native_lib.init_systick(uc._uch, reload_val) == 0
IRQ_NOT_USED=0xffffffff
def add_timer(reload_val, callback=None, isr_num=IRQ_NOT_USED):
global timer_cb_wrapper
global timer_cb_user_data
global native_lib
assert timer_cb_wrapper is not None and timer_cb_user_data is not None
# While technically allowed in the C code, invoking a callback and pending an interrupt at the same time is nothing we would like to support
assert not (callback is not None and isr_num != IRQ_NOT_USED)
passed_cb = timer_cb_wrapper if callback is not None else 0
return native_lib.add_timer(reload_val, passed_cb, timer_cb_user_data, isr_num)
def is_running(timer_id):
return native_lib.is_running(timer_id)
def get_global_ticker():
global native_lib
return native_lib.get_global_ticker()
def rem_timer(uc, timer_id):
global native_lib
assert native_lib.rem_timer(uc, timer_id) == 0
def reload_timer(timer_id):
global native_lib
assert native_lib.reload_timer(timer_id) == 0
def start_timer(uc, timer_id):
global native_lib
assert native_lib.start_timer(uc, timer_id) == 0
def stop_timer(uc, timer_id):
global native_lib
assert native_lib.stop_timer(uc, timer_id) == 0
# uc_hook add_interrupt_trigger(uc_engine *uc, uint64_t addr, uint32_t irq, uint32_t num_skips, uint32_t num_pends, uint32_t do_fuzz);
def add_interrupt_trigger(uc, addr, irq, num_skips, num_pends, fuzz_mode, trigger_mode, every_nth_tick):
assert fuzz_mode < len(FUZZ_MODES) and trigger_mode < len(TRIGGER_MODES)
assert native_lib.add_interrupt_trigger(uc._uch, addr, irq, num_skips, num_pends, fuzz_mode, trigger_mode, every_nth_tick) == 0
def register_native_debug_hooks(uc):
assert(native_lib.add_debug_hooks(uc._uch) == 0)
def load_native_lib(native_lib_path):
global native_lib
native_lib = _load_lib(native_lib_path)
assert native_lib is not None
def do_exit(uc, status, sig=-1):
global native_lib
native_lib.do_exit(uc, status, sig)
def init(uc, mmio_regions, exit_at_bbls, exit_at_hit_num, do_print_exit_info, fuzz_consumption_timeout=DEFAULT_FUZZ_CONSUMPTION_TIMEOUT, instr_limit=DEFAULT_BASIC_BLOCK_LIMIT):
global native_lib
global mmio_cb_wrapper
# GENERAL
# uc_err init( uc_engine *uc, exit_hook_t p_exit_hook, int p_num_mmio_regions, uint64_t *p_mmio_starts, uint64_t *p_mmio_ends, void *p_py_default_mmio_user_data, uint32_t num_exit_at_bbls, uint64_t *exit_at_bbls, uint32_t exit_at_hit_num, int p_do_print_exit_info, uint64_t fuzz_consumption_timeout, uint64_t p_instr_limit);
_setup_prototype(native_lib, "init", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint64, ctypes.c_uint64)
# uc_err register_cond_py_handler_hook(uc_cb_hookcode_t py_callback, uint64_t *addrs, int num_addrs)
_setup_prototype(native_lib, "register_cond_py_handler_hook", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# uc_err remove_function_handler_hook_address(uc_engine * uc, uint64_t address);
_setup_prototype(native_lib, "remove_function_handler_hook_address", ctypes.c_int, uc_engine, ctypes.c_uint64)
# void do_exit(uc_engine *uc, int status, int sig);
_setup_prototype(native_lib, "do_exit", ctypes.c_int, uc_engine, ctypes.c_int, ctypes.c_int)
# FUZZING
_setup_prototype(native_lib, "load_fuzz", ctypes.c_int, ctypes.c_char_p)
# uint32_t fuzz_remaining();
_setup_prototype(native_lib, "fuzz_remaining", ctypes.c_int)
# uint64_t num_consumed_fuzz();
_setup_prototype(native_lib, "fuzz_consumed", ctypes.c_uint32)
# uint32_t get_latest_mmio_fuzz_access_size();
_setup_prototype(native_lib, "get_latest_mmio_fuzz_access_size", ctypes.c_uint32)
# uint32_t get_latest_mmio_fuzz_access_index();
_setup_prototype(native_lib, "get_latest_mmio_fuzz_access_index", ctypes.c_uint32)
# char *get_fuzz_ptr(uc_engine *uc, uint32_t size);
_setup_prototype(native_lib, "get_fuzz_ptr", ctypes.c_void_p, uc_engine, ctypes.c_uint32)
# uc_err add_mmio_region(uc_engine *uc, uint64_t begin, uint64_t end)
_setup_prototype(native_lib, "add_mmio_region", ctypes.c_int, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_void_p)
# extern uc_err register_py_handled_mmio_ranges(uc_engine *uc, uc_cb_hookmem_t py_callback, uint64_t *starts, uint64_t *ends, int num_ranges);
_setup_prototype(native_lib, "register_py_handled_mmio_ranges", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err set_ignored_mmio_addresses(uint64_t *addresses, uint32_t *pcs, int num_addresses);
_setup_prototype(native_lib, "set_ignored_mmio_addresses", ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err register_linear_mmio_models(uc_engine *uc, uint64_t *starts, uint64_t *ends, uint32_t *pcs, uint32_t *init_vals, uint32_t *steps, int num_ranges);
_setup_prototype(native_lib, "register_linear_mmio_models", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err register_constant_mmio_models(uc_engine *uc, uint64_t *starts, uint64_t *ends, uint32_t *pcs, uint32_t *vals, int num_ranges)
_setup_prototype(native_lib, "register_constant_mmio_models", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err register_bitextract_mmio_models(uc_engine *uc, uint64_t *starts, uint64_t *ends, uint32_t *pcs, uint8_t *byte_sizes, uint8_t *left_shifts, uint32_t * masks, int num_ranges);
_setup_prototype(native_lib, "register_bitextract_mmio_models", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern | |
of themselves
y, yfit = [y], [yfit]
found_rv = [found_rv]
# order gen is just set to a list containing the first
# (and only) element
order_gen = [0]
# else if orders is None we get all orders
elif order is None:
order_gen = plotter.plotloop(orders)
# prompt to start looper
plotter.close_plots(loop=True)
# else we just deal with the order specified
else:
order_gen = [order]
# ------------------------------------------------------------------
# deal with plot style
if 'dark' in params['DRS_PLOT_STYLE']:
black = 'white'
else:
black = 'black'
# ------------------------------------------------------------------
# loop around orders
for order_num in order_gen:
# get this orders values
y_i = y[order_num]
yfit_i = yfit[order_num]
# work out the residuals
res = y_i - yfit_i
# ------------------------------------------------------------------
# set up plot
gs = dict(height_ratios=[2, 1])
fig, frames = graph.set_figure(plotter, nrows=2, ncols=1, sharex=True,
gridspec_kw=gs)
# ------------------------------------------------------------------
# plot x vs y and yfit
frames[0].plot(x, y_i, label='data', marker='x', ls='None',
color=black)
frames[0].plot(x, yfit_i, label='fit')
# plot residuals
frames[1].plot(x, res, label='residuals')
# plot legends
frames[0].legend(loc=0)
frames[1].legend(loc=0)
# set labels and title
targs = ['({0})'.format(kind), found_rv[order_num], ccf_mask]
title = 'CCF plot {0}\n RV={1:.5f} km/s Mask={2}'.format(*targs)
if orders is not None:
title = 'RV Fit plot. Order {0}'.format(order_num)
frames[0].set(ylabel='CCF', title=title)
frames[1].set(xlabel='RV [km/s]', ylabel='CCF')
# ------------------------------------------------------------------
# adjust size
fig.subplots_adjust(hspace=0, wspace=0)
# ------------------------------------------------------------------
# update filename (adding order_num to end)
suffix = 'order{0}'.format(order_num)
graph.set_filename(plotter.params, plotter.location, suffix=suffix)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_ccf_swave_ref(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
wavemap = kwargs['wavemap']
image = kwargs['image']
fiber = kwargs['fiber']
nbo = kwargs['nbo']
# optional arguments
order = kwargs.get('order', None)
orders = kwargs.get('orders', None)
# ------------------------------------------------------------------
if order is None and orders is None:
order_gen = plotter.plotloop(np.arange(nbo))
# prompt to start looper
plotter.close_plots(loop=True)
# else we check whether orders is set
elif orders is not None:
order_gen = list(orders)
# else we just deal with the order specified
elif order is not None:
order_gen = [order]
else:
order_gen = [0]
# ------------------------------------------------------------------
# loop around orders
for order_num in order_gen:
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter)
# ------------------------------------------------------------------
# plot fits
frame.plot(wavemap[order_num], image[order_num])
# set title labels limits
title = 'spectral order {0} fiber {1}'
frame.set(xlabel='Wavelength [nm]', ylabel='flux',
title=title.format(order_num, fiber))
# ------------------------------------------------------------------
# update filename (adding order_num to end)
suffix = 'order{0}_{1}'.format(order_num, fiber)
graph.set_filename(plotter.params, plotter.location, suffix=suffix)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_ccf_photon_uncert(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
x = kwargs.get('x')
y_sp = kwargs.get('y_sp')
y_ccf = kwargs.get('y_cc')
# get max/min points
with warnings.catch_warnings(record=True) as _:
ymin = mp.nanmin(y_ccf)
ymax = mp.nanmax(y_ccf)
if not np.isfinite(ymin):
ymin = mp.nanmin(y_sp)
if not np.isfinite(ymax):
ymax = mp.nanmax(y_sp)
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter)
# ------------------------------------------------------------------
# plot fits
frame.plot(x, y_sp, label='DVRMS Spectrum', marker='x', linestyle='None')
# plot ccf noise (unless all NaNs)
if np.sum(np.isnan(y_ccf)) != len(y_ccf):
frame.plot(x, y_ccf, label='DVRMS CCF', marker='o', linestyle='None')
# set title labels limits
title = 'Photon noise uncertainty versus spectral order'
frame.set(xlabel='Order number', ylabel='Photon noise uncertainty [m/s]',
title=title)
# deal with limits (may be NaN)
if np.isfinite(ymin) and np.isfinite(ymax):
frame.set_ylim(bottom=ymin, top=ymax)
elif np.isfinite(ymin):
frame.set_ylim(bottom=ymin)
else:
frame.set_ylim(top=ymax)
frame.legend(loc=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
ccf_rv_fit_loop = Graph('CCF_RV_FIT_LOOP', kind='debug', func=plot_ccf_rv_fit)
ccf_rv_fit = Graph('CCF_RV_FIT', kind='debug', func=plot_ccf_rv_fit)
ccf_swave_ref = Graph('CCF_SWAVE_REF', kind='debug', func=plot_ccf_swave_ref)
ccf_photon_uncert = Graph('CCF_PHOTON_UNCERT', kind='debug',
func=plot_ccf_photon_uncert)
sum_ccf_rv_fit = Graph('SUM_CCF_RV_FIT', kind='summary', func=plot_ccf_rv_fit)
sum_ccf_photon_uncert = Graph('SUM_CCF_PHOTON_UNCERT', kind='summary',
func=plot_ccf_photon_uncert)
# add to definitions
definitions += [ccf_rv_fit, ccf_rv_fit_loop, ccf_swave_ref,
ccf_photon_uncert, sum_ccf_rv_fit, sum_ccf_photon_uncert]
# =============================================================================
# Define polarisation plotting functions
# =============================================================================
def plot_polar_continuum(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
props = kwargs['props']
# get data from props
wl = props['FLAT_X']
pol = 100 * props['FLAT_POL']
contpol = 100.0 * props['CONT_POL']
contxbin = np.array(props['CONT_XBIN'])
contybin = 100. * np.array(props['CONT_YBIN'])
stokes = props['STOKES']
method = props['METHOD']
nexp = props['NEXPOSURES']
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
# set up title
title = 'Polarimetry: Stokes {0}, Method={1}, for {2} exposures'
titleargs = [stokes, method, nexp]
# ------------------------------------------------------------------
# plot polarimetry data
frame.plot(wl, pol, linestyle='None', marker='.',
label='Degree of Polarization')
# plot continuum sample points
frame.plot(contxbin, contybin, linestyle='None', marker='o',
label='Continuum Sampling')
# plot continuum fit
frame.plot(wl, contpol, label='Continuum Polarization')
# ---------------------------------------------------------------------
# set title and labels
xlabel = 'wavelength [nm]'
ylabel = 'Degree of polarization for Stokes {0} [%]'.format(stokes)
frame.set(title=title.format(*titleargs), xlabel=xlabel, ylabel=ylabel)
# ---------------------------------------------------------------------
# plot legend
frame.legend(loc=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_polar_results(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
props = kwargs['props']
# get data from props
wl = props['FLAT_X']
pol = 100 * props['FLAT_POL']
null1 = 100.0 * props['FLAT_NULL1']
null2 = 100.0 * props['FLAT_NULL2']
stokes = props['STOKES']
method = props['METHOD']
nexp = props['NEXPOSURES']
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
# set up title
title = 'Polarimetry: Stokes {0}, Method={1}, for {2} exposures'
titleargs = [stokes, method, nexp]
# ---------------------------------------------------------------------
# plot polarimetry data
frame.plot(wl, pol, label='Degree of Polarization')
# plot null1 data
frame.plot(wl, null1, label='Null Polarization 1')
# plot null2 data
frame.plot(wl, null2, label='Null Polarization 2')
# ---------------------------------------------------------------------
# set title and labels
xlabel = 'wavelength [nm]'
ylabel = 'Degree of polarization for Stokes {0} [%]'.format(stokes)
frame.set(title=title.format(*titleargs), xlabel=xlabel, ylabel=ylabel)
# ---------------------------------------------------------------------
# plot legend
frame.legend(loc=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_polar_stoke_i(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
props = kwargs['props']
# get data from props
wl = props['FLAT_X']
stokes_i = props['FLAT_STOKES_I']
stokes_ierr = props['FLAT_STOKES_I_ERR']
stokes = props['STOKES']
method = props['METHOD']
nexp = props['NEXPOSURES']
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
# set up title
title = 'Polarimetry: Stokes {0}, Method={1}, for {2} exposures'
titleargs = [stokes, method, nexp]
# ---------------------------------------------------------------------
# plot polarimetry data
frame.errorbar(wl, stokes_i, yerr=stokes_ierr, fmt='-', label='Stokes I',
alpha=0.5)
# ---------------------------------------------------------------------
# set title and labels
xlabel = 'wavelength [nm]'
ylabel = 'Stokes {0} total flux (ADU)'.format(stokes)
frame.set(title=title.format(*titleargs), xlabel=xlabel, ylabel=ylabel)
# ---------------------------------------------------------------------
# plot legend
frame.legend(loc=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_polar_lsd(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
pprops = kwargs['pprops']
lprops = kwargs['lprops']
# get data from props
vels = lprops['LSD_VELOCITIES']
zz = lprops['LSD_STOKES_I']
zgauss = lprops['LSD_STOKES_I_MODEL']
z_p = lprops['LSD_STOKES_VQU']
z_np = lprops['LSD_NULL']
stokes = pprops['STOKES']
# ------------------------------------------------------------------
# set up plot
fig, frames = graph.set_figure(plotter, nrows=1, ncols=3)
# ------------------------------------------------------------------
frame = frames[0]
frame.plot(vels, zz, '-')
frame.plot(vels, zgauss, '-')
title = 'LSD Analysis'
ylabel = 'Stokes I profile'
xlabel = ''
# set title and labels
frame.set(title=title, xlabel=xlabel, ylabel=ylabel)
# ---------------------------------------------------------------------
frame = frames[1]
title = ''
frame.plot(vels, z_p, '-')
ylabel = 'Stokes {0} profile'.format(stokes)
xlabel = ''
# set title and labels
frame.set(title=title, xlabel=xlabel, ylabel=ylabel)
# ---------------------------------------------------------------------
frame = frames[2]
frame.plot(vels, z_np, '-')
xlabel = 'velocity (km/s)'
ylabel = 'Null profile'
# set title and labels
frame.set(title=title, xlabel=xlabel, ylabel=ylabel)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
polar_continuum = Graph('POLAR_CONTINUUM', kind='debug',
func=plot_polar_continuum)
polar_results = Graph('POLAR_RESULTS', kind='debug', func=plot_polar_results)
polar_stokes_i = Graph('POLAR_STOKES_I', kind='debug', func=plot_polar_stoke_i)
polar_lsd = Graph('POLAR_LSD', kind='debug', func=plot_polar_lsd)
# add to definitions
definitions += [polar_continuum, polar_results, polar_stokes_i, polar_lsd]
# =============================================================================
# Define tool functions
# =============================================================================
def plot_logstats_bar(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
started = kwargs['started']
passed = kwargs['passed']
ended = kwargs['ended']
urecipes = kwargs['urecipes']
# make arrays
x = np.arange(0, len(urecipes))
# ------------------------------------------------------------------
# set up plot
fig, frames = graph.set_figure(plotter, nrows=1, ncols=2)
# ------------------------------------------------------------------
width = 0.3
frames[0].bar(x - width, started, color='b', label='started',
align='center', width=width, zorder=5, alpha=0.875)
frames[0].bar(x, passed, color='r', label='passed QC',
align='center', width=width, zorder=5, alpha=0.875)
| |
# -*- python -*-
from __future__ import division
from professor2.core import *
from professor2.histos import *
from professor2.errors import *
from professor2.sampling import *
def mk_ipolinputs(params):
"""
Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol
params is a dict (actually, prefer OrderedDict) of run_names -> param_vals,
as returned from read_rundata
"""
runs = sorted(params.keys())
if not runs:
return runs, [], [[]]
paramnames = params[runs[0]].keys()
paramslist = [[params[run][pn] for pn in paramnames] for run in runs]
return runs, paramnames, paramslist
def mk_ipolbin(rawP, rawV, rawE, xmin, xmax, CFG):
# TODO finally learn how to use kwargs
order = CFG["ORDER"]
errorder = CFG["ERR_ORDER"]
errmode = CFG["ERR_MODE"]
medfilt = CFG["MEDIAN_FILT"]
if medfilt>0:
from numpy import median
# TODO figure out what to do with x=0
relErrs = [rawE[num]/x if x!=0 else 1 for num, x in enumerate(rawV)]
rem = medfilt*median(relErrs)
P, V, E =[], [], []
for num, x in enumerate(relErrs):
if x < rem:
P.append(rawP[num])
V.append(rawV[num])
E.append(rawE[num])
if CFG["DEBUG"]:
print "%i/%i survive median filter %f times %f"%(len(P), len(rawP), medfilt, median(relErrs))
else:
P=rawP
V=rawV
E=rawE
import professor2 as prof
pmin = prof.mk_minvals(P)
pmax = prof.mk_maxvals(P)
if order == "auto":
valipol =mk_autoipol(P, V, CFG)
else:
valipol = Ipol(P, V, int(order))
## Check for NaN coeffs
import math
if any([math.isnan(x) for x in valipol.coeffs]):
raise NanError("NaN coefficient encountered in value ipol")
## Build the error interpolation(s)
if not errmode or errmode == "none":
erripols = None
## Build the error interpolation(s)
elif errmode == "mean":
meanerr = sum(E) / float(len(E)) #histos[run].bins[binnr].err for run in runs) / float(len(runs))
erripols = Ipol(P, [meanerr], 0) #< const 0th order interpolation
elif errmode == "median":
medianerr = E[len(E)//2]
erripols = Ipol(P, [medianerr], 0) #< const 0th order interpolation
elif errmode == "symm":
if errorder == "auto":
erripols = mk_autoipol(P, E, CFG)
else:
erripols = Ipol(P, E, int(errorder))
elif errmode == "asymm":
raise Exception("Error interpolation mode 'asymm' not yet supported")
else:
raise Exception("Unknown error interpolation mode '%s'" % errmode)
if erripols is not None:
if any([math.isnan(x) for x in erripols.coeffs]):
raise NanError("NaN coefficient encountered in error ipol")
return IpolBin(xmin, xmax, valipol, erripols), pmin, pmax
def mk_autoipol(P, V, CFG):
omin = CFG["AUTO_OMIN"] if CFG.has_key("AUTO_OMIN") else 0
omax = CFG["AUTO_OMAX"] if CFG.has_key("AUTO_OMAX") else 99
split = CFG["AUTO_SPLIT"] if CFG.has_key("AUTO_SPLIT") else 0.1
nit = CFG["AUTO_NIT"] if CFG.has_key("AUTO_NIT") else 10
debug = CFG["DEBUG"] if CFG.has_key("DEBUG") else False
# Incides of all inputs --- needed for shuffeling
ALLI = range(len(P))
# Number of test points
NTEST=int(split*len(ALLI))
NTRAIN=len(ALLI)-NTEST
# Prepare training samples
trainings = [r for r in xrandomUniqueCombinations(ALLI, len(ALLI)-NTEST, nit)]
# Prepare test samples
tests = [[a for a in ALLI if not a in t] for t in trainings]
# Dimension of parameter space
DIM=len(P[0])
# Get possible orders
ORDERS=[]
o_temp=omin
while True:
n_temp = numCoeffs(DIM, o_temp)
if n_temp > NTRAIN or o_temp > omax:
break
ORDERS.append(o_temp)
o_temp+=1
residuals, meanresiduals, meanresidualstimesncoeff = {}, {}, {}
for o in ORDERS:
residuals[o] = []
# Iterate through training "samples"
for num, train in enumerate(trainings):
# Calculate ipol for this run combination
thisP = [P[x] for x in train]
thisV = [V[x] for x in train]
thisI = Ipol(thisP, thisV, o)
# Get the residuals for all test points
thisRes = [(thisI.val(P[x]) - V[x])**2 for x in xrange(len(tests[num]))]
residuals[o].extend(thisRes)
from numpy import mean
for k, v in residuals.iteritems():
meanresiduals[k] = mean(v)
meanresidualstimesncoeff[k] = mean(v) * numCoeffs(DIM, k)
#winner=min(meanresiduals, key=meanresiduals.get)
winner=min(meanresidualstimesncoeff, key=meanresidualstimesncoeff.get)
if debug:
print "Residual summary:"
print "Choose order %i"%winner
for k, v in meanresiduals.iteritems():
print "%i: %e times %i coeffs = %e"%(k, v, numCoeffs(DIM, k), meanresidualstimesncoeff[k])
return Ipol(P, V, winner)
## Keep this for backward compatibility
def mk_ipolhisto(histos, runs, paramslist, order, errmode=None, errorder=None):
"""\
Make a prof.IpolHisto from a dict of prof.DataHistos and the corresponding
runs and params lists, at the given polynomial order.
If errs is non-null, the data histo errors will also be interpolated.
If errmode is None or 'none', uncertainties will not be parameterised and
will return 0 if queried; 'mean' and 'median' will use fixed values derived
from the anchor points; 'symm' will parameterise the average of the + and -
errors of each bin at the polynomial order given by errorder. If errorder is
None, the same order as for the value parameterisation will be used.
Parameter range scaling will be applied, so a DoParamScaling=true flag will
need to be written to the metadata when persisting the resulting IpolHisto.
"""
if errmode is None:
errmode = "none"
if errorder is None:
errorder = order
#
nbins = len(histos.itervalues().next().bins)
ibins = []
for n in xrange(nbins):
## Check that the bin edges are consistent and extract their values
# TODO: move bin edge consistency checking into the Histo base class
xmax = histos.values()[0].bins[n].xmax
xmin = histos.values()[0].bins[n].xmin
vals = [histos[run].bins[n].val for run in runs]
errs = [histos[run].bins[n].err for run in runs]
try:
ibins.append(mk_ipolbin(paramslist, vals, errs, xmin, xmax, order, errmode, errorder))
except NanError, ne:
print ne, "in bin %i of %s" % (n, histos.values()[0].path)
return Histo(ibins, histos.values()[0].path)
# https://stackoverflow.com/questions/2130016/splitting-a-list-of-into-n-parts-of-approximately-equal-length
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def mkStandardIpols(HISTOS, HNAMES, RUNS, PARAMSLIST, CFG, nchunks=10, referenceIpolSet=None, quiet=False):
"""
the referenceIpolSet allows to make structurally identical ipols
"""
BNAMES = []
for hn in HNAMES:
histos = HISTOS[hn]
nbins = histos.values()[0].nbins
for n in xrange(nbins):
BNAMES.append([hn, n])
if referenceIpolSet is not None:
b_order_val = referenceIpolSet[hn].bins[n].ival.order
b_order_err = referenceIpolSet[hn].bins[n].ierrs.order
BNAMES[-1].append(b_order_val)
BNAMES[-1].append(b_order_err)
NBINS = len(BNAMES)
MSGEVERY = int(NBINS/100.) if NBINS > 100 else 1;
import sys, zlib
import professor2 as prof
def worker(q, rdict, counter):
"Function to make bin ipols and store ipol persistency strings for each histo"
import sys
while True:
if q.empty():
break
try:
temp = q.get(False)
except:
break
hn = temp[0]
histos = HISTOS[hn]
n = temp[1]
xmax = histos.values()[0].bins[n].xmax
xmin = histos.values()[0].bins[n].xmin
from math import log
if CFG["LOGY"]:
vals = [log(histos[run].bins[n].val) if histos[run].bins[n].val>0 else 0 for run in RUNS]
errs = [log(histos[run].bins[n].err) if histos[run].bins[n].err>0 else -1 for run in RUNS]
else:
vals = [histos[run].bins[n].val for run in RUNS]
errs = [histos[run].bins[n].err for run in RUNS]
try:
if referenceIpolSet is not None:
CFG["ORDER"] = temp[2]
CFG["ERR_ORDER"]=temp[3]
ib, pmin, pmax = prof.mk_ipolbin(PARAMSLIST, vals, errs, xmin, xmax, CFG)
else:
ib, pmin, pmax = prof.mk_ipolbin(PARAMSLIST, vals, errs, xmin, xmax, CFG)
s = ""
s += "%s#%d %.5e %.5e\n" % (hn, n, ib.xmin, ib.xmax)
s += " " + ib.ival.toString("val")
for v in pmin:
s+= " %.5e"%v
for v in pmax:
s+= " %.5e"%v
s+= "\n"
if ib.ierrs:
s += " " + ib.ierrs.toString("err")
for v in pmin:
s+= " %.5e"%v
for v in pmax:
s+= " %.5e"%v
s+= "\n"
rdict.put( [hn,n, zlib.compress(s, 9)])
del s
del ib #< pro-actively clear up memory
except NanError, ne:
print ne, "in bin %i of %s" % (n, histos.values()[0].path)
del histos
counter.value += 1
if counter.value == MSGEVERY and not quiet:
counter.value = 0
sys.stderr.write('\rProgress: {current}/{total}\r'.format(current=rdict.qsize(), total=NBINS))
q.task_done()
return
# TODO: Printing and multiprocessing should happen under script control
if not CFG["QUIET"]:
print "\nParametrising %i objects...\n" % len(BNAMES)
import time, multiprocessing
rDict = {}
time1 = time.time()
from multiprocessing import Manager, Process
manager = Manager()
# This for the status --- modulus is too expensive
ndone = manager.Value('i', 0)
## A shared memory object is required for coefficient retrieval
r = manager.Queue()
# # For testing with IPython embed --- leave the following 4 lines in please
# q = manager.Queue()
# for chunk in chunkIt(BNAMES, nchunks): # The chunking is necessary as the memory blows up otherwise
# map(lambda x:q.put(x), chunk)
# worker(q, r , ndone)
for chunk in chunkIt(BNAMES, nchunks): # The chunking is necessary as the memory blows up otherwise
## The job queue
q = manager.Queue()
## Fire away
workers = [Process(target=worker, args=(q, r, ndone)) for i in xrange(CFG["MULTI"])]
map(lambda x:q.put(x), chunk)
map(lambda x:x.start(), workers)
map(lambda x:x.join(), | |
<reponame>l9c/flask-limiter<filename>tests/test_decorators.py
import functools
from functools import wraps
import hiro
import mock
from flask import Blueprint, request, current_app, Flask, g
from werkzeug.exceptions import BadRequest
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr, get_remote_address
def test_multiple_decorators(extension_factory):
app, limiter = extension_factory(key_func=get_ipaddr)
@app.route("/t1")
@limiter.limit(
"100 per minute", lambda: "test"
) # effectively becomes a limit for all users
@limiter.limit("50/minute") # per ip as per default key_func
def t1():
return "test"
with hiro.Timeline().freeze():
with app.test_client() as cli:
for i in range(0, 100):
assert (200 if i < 50 else 429) == cli.get(
"/t1", headers={
"X_FORWARDED_FOR": "127.0.0.2"
}
).status_code
for i in range(50):
assert 200 == cli.get("/t1").status_code
assert 429 == cli.get("/t1").status_code
assert 429 == \
cli.get("/t1", headers={
"X_FORWARDED_FOR": "127.0.0.3"
}).status_code
def test_exempt_routes(extension_factory):
app, limiter = extension_factory(default_limits=["1/minute"])
@app.route("/t1")
def t1():
return "test"
@app.route("/t2")
@limiter.exempt
def t2():
return "test"
with app.test_client() as cli:
assert cli.get("/t1").status_code == 200
assert cli.get("/t1").status_code == 429
assert cli.get("/t2").status_code == 200
assert cli.get("/t2").status_code == 200
def test_decorated_limit_with_conditional_deduction(extension_factory):
app, limiter = extension_factory()
@app.route("/t/<path:path>")
@limiter.limit(
"1/second", deduct_when=lambda resp: resp.status_code == 200
)
@limiter.limit(
"1/minute", deduct_when=lambda resp: resp.status_code == 400
)
def t(path):
if path == "1":
return "test"
raise BadRequest()
with hiro.Timeline() as timeline:
with app.test_client() as cli:
assert cli.get("/t/1").status_code == 200
assert cli.get("/t/1").status_code == 429
timeline.forward(1)
assert cli.get("/t/2").status_code == 400
timeline.forward(1)
assert cli.get("/t/1").status_code == 429
assert cli.get("/t/2").status_code == 429
timeline.forward(60)
assert cli.get("/t/1").status_code == 200
def test_shared_limit_with_conditional_deduction(extension_factory):
app, limiter = extension_factory()
bp = Blueprint("main", __name__)
limit = limiter.shared_limit(
"2/minute", "not_found",
deduct_when=lambda response: response.status_code == 400
)
@app.route("/test/<path:path>")
@limit
def app_test(path):
if path != "1":
raise BadRequest()
return path
@bp.route("/test/<path:path>")
def bp_test(path):
if path != "1":
raise BadRequest()
return path
limit(bp)
app.register_blueprint(bp, url_prefix='/bp')
with hiro.Timeline() as timeline:
with app.test_client() as cli:
assert cli.get("/bp/test/1").status_code == 200
assert cli.get("/bp/test/1").status_code == 200
assert cli.get("/test/1").status_code == 200
assert cli.get("/bp/test/2").status_code == 400
assert cli.get("/test/2").status_code == 400
assert cli.get("/bp/test/2").status_code == 429
assert cli.get("/bp/test/1").status_code == 429
assert cli.get("/test/1").status_code == 429
assert cli.get("/test/2").status_code == 429
timeline.forward(60)
assert cli.get("/bp/test/1").status_code == 200
assert cli.get("/test/1").status_code == 200
def test_header_ordering_with_conditional_deductions(extension_factory):
app, limiter = extension_factory(
default_limits=['3/second'], headers_enabled=True
)
@app.route("/test_combined/<path:path>")
@limiter.limit(
"1/hour", override_defaults=False,
deduct_when=lambda response: response.status_code != 200
)
@limiter.limit(
"4/minute", override_defaults=False,
deduct_when=lambda response: response.status_code == 200
)
def app_test_combined(path):
if path != "1":
raise BadRequest()
return path
@app.route("/test/<path:path>")
@limiter.limit(
"2/hour", deduct_when=lambda response: response.status_code != 200
)
def app_test(path):
if path != "1":
raise BadRequest()
return path
with hiro.Timeline() as timeline:
with app.test_client() as cli:
assert cli.get("/test_combined/1").status_code == 200
resp = cli.get("/test_combined/1")
assert resp.status_code == 200
assert resp.headers.get('X-RateLimit-Limit') == '3'
assert resp.headers.get('X-RateLimit-Remaining') == '1'
assert cli.get("/test_combined/2").status_code == 400
resp = cli.get("/test/1")
assert resp.headers.get('X-RateLimit-Limit') == '2'
assert resp.headers.get('X-RateLimit-Remaining') == '2'
resp = cli.get("/test/2")
assert resp.headers.get('X-RateLimit-Limit') == '2'
assert resp.headers.get('X-RateLimit-Remaining') == '1'
resp = cli.get("/test_combined/1")
assert resp.status_code == 429
assert resp.headers.get('X-RateLimit-Limit') == '1'
assert resp.headers.get('X-RateLimit-Remaining') == '0'
assert cli.get("/test_combined/2").status_code == 429
timeline.forward(60)
assert cli.get("/test_combined/1").status_code == 429
assert cli.get("/test_combined/2").status_code == 429
timeline.forward(3600)
assert cli.get("/test_combined/1").status_code == 200
def test_decorated_limits_with_combined_defaults(extension_factory):
app, limiter = extension_factory(
default_limits=['2/minute']
)
@app.route("/")
@limiter.limit("1/second", override_defaults=False)
def root():
return "root"
with hiro.Timeline() as timeline:
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 429 == cli.get("/").status_code
timeline.forward(60)
assert 200 == cli.get("/").status_code
timeline.forward(1)
assert 200 == cli.get("/").status_code
timeline.forward(1)
assert 429 == cli.get("/").status_code
def test_decorated_limit_with_combined_defaults_per_method(extension_factory):
app, limiter = extension_factory(
default_limits=['2/minute'],
default_limits_per_method=True
)
@app.route("/", methods=['GET', 'PUT'])
@limiter.limit("1/second", override_defaults=False, methods=['GET'])
def root():
return "root"
with hiro.Timeline() as timeline:
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 429 == cli.get("/").status_code
assert 200 == cli.put("/").status_code
assert 200 == cli.put("/").status_code
assert 429 == cli.put("/").status_code
timeline.forward(60)
assert 200 == cli.get("/").status_code
assert 200 == cli.put("/").status_code
timeline.forward(1)
assert 200 == cli.get("/").status_code
assert 200 == cli.put("/").status_code
timeline.forward(1)
assert 429 == cli.get("/").status_code
assert 429 == cli.put("/").status_code
def test_decorated_dynamic_limits(extension_factory):
app, limiter = extension_factory(
{"X": "2 per second"}, default_limits=["1/second"]
)
def request_context_limit():
limits = {
"127.0.0.1": "10 per minute",
"127.0.0.2": "1 per minute"
}
remote_addr = (request.access_route and request.access_route[0]
) or request.remote_addr or '127.0.0.1'
limit = limits.setdefault(remote_addr, '1 per minute')
return limit
@app.route("/t1")
@limiter.limit("20/day")
@limiter.limit(lambda: current_app.config.get("X"))
@limiter.limit(request_context_limit)
def t1():
return "42"
@app.route("/t2")
@limiter.limit(lambda: current_app.config.get("X"))
def t2():
return "42"
R1 = {"X_FORWARDED_FOR": "127.0.0.1, 127.0.0.0"}
R2 = {"X_FORWARDED_FOR": "127.0.0.2"}
with app.test_client() as cli:
with hiro.Timeline().freeze() as timeline:
for i in range(0, 10):
assert cli.get("/t1", headers=R1).status_code == 200
timeline.forward(1)
assert cli.get("/t1", headers=R1).status_code == 429
assert cli.get("/t1", headers=R2).status_code == 200
assert cli.get("/t1", headers=R2).status_code == 429
timeline.forward(60)
assert cli.get("/t1", headers=R2).status_code == 200
assert cli.get("/t2").status_code == 200
assert cli.get("/t2").status_code == 200
assert cli.get("/t2").status_code == 429
timeline.forward(1)
assert cli.get("/t2").status_code == 200
def test_invalid_decorated_dynamic_limits(caplog):
app = Flask(__name__)
app.config.setdefault("X", "2 per sec")
limiter = Limiter(
app, default_limits=["1/second"], key_func=get_remote_address
)
@app.route("/t1")
@limiter.limit(lambda: current_app.config.get("X"))
def t1():
return "42"
with app.test_client() as cli:
with hiro.Timeline().freeze():
assert cli.get("/t1").status_code == 200
assert cli.get("/t1").status_code == 429
# 2 for invalid limit, 1 for warning.
assert len(caplog.records) == 3
assert (
"failed to load ratelimit"
in caplog.records[0].msg
)
assert (
"failed to load ratelimit"
in caplog.records[1].msg
)
assert (
"exceeded at endpoint"
in caplog.records[2].msg
)
assert caplog.records[2].levelname == 'WARNING'
def test_invalid_decorated_static_limits(caplog):
app = Flask(__name__)
limiter = Limiter(
app, default_limits=["1/second"], key_func=get_remote_address
)
@app.route("/t1")
@limiter.limit("2/sec")
def t1():
return "42"
with app.test_client() as cli:
with hiro.Timeline().freeze():
assert cli.get("/t1").status_code == 200
assert cli.get("/t1").status_code == 429
assert (
"failed to configure"
in caplog.records[0].msg
)
assert (
"exceeded at endpoint"
in caplog.records[1].msg
)
def test_named_shared_limit(extension_factory):
app, limiter = extension_factory()
shared_limit_a = limiter.shared_limit("1/minute", scope='a')
shared_limit_b = limiter.shared_limit("1/minute", scope='b')
@app.route("/t1")
@shared_limit_a
def route1():
return "route1"
@app.route("/t2")
@shared_limit_a
def route2():
return "route2"
@app.route("/t3")
@shared_limit_b
def route3():
return "route3"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert 200 == cli.get("/t1").status_code
assert 200 == cli.get("/t3").status_code
assert 429 == cli.get("/t2").status_code
def test_dynamic_shared_limit(extension_factory):
app, limiter = extension_factory()
fn_a = mock.Mock()
fn_b = mock.Mock()
fn_a.return_value = "foo"
fn_b.return_value = "bar"
dy_limit_a = limiter.shared_limit("1/minute", scope=fn_a)
dy_limit_b = limiter.shared_limit("1/minute", scope=fn_b)
@app.route("/t1")
@dy_limit_a
def t1():
return "route1"
@app.route("/t2")
@dy_limit_a
def t2():
return "route2"
@app.route("/t3")
@dy_limit_b
def t3():
return "route3"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert 200 == cli.get("/t1").status_code
assert 200 == cli.get("/t3").status_code
assert 429 == cli.get("/t2").status_code
assert 429 == cli.get("/t3").status_code
assert 2 == fn_a.call_count
assert 2 == fn_b.call_count
fn_b.assert_called_with("t3")
fn_a.assert_has_calls([mock.call("t1"), mock.call("t2")])
def test_conditional_limits():
"""Test that the conditional activation of the limits work."""
app = Flask(__name__)
limiter = Limiter(app, key_func=get_remote_address)
@app.route("/limited")
@limiter.limit("1 per day")
def limited_route():
return "passed"
@app.route("/unlimited")
@limiter.limit("1 per day", exempt_when=lambda: True)
def never_limited_route():
return "should always pass"
is_exempt = False
@app.route("/conditional")
@limiter.limit("1 per day", exempt_when=lambda: is_exempt)
def conditionally_limited_route():
return "conditional"
with app.test_client() as cli:
assert cli.get("/limited").status_code == 200
assert cli.get("/limited").status_code == 429
assert cli.get("/unlimited").status_code == 200
assert cli.get("/unlimited").status_code == 200
assert cli.get("/conditional").status_code == 200
assert cli.get("/conditional").status_code == 429
is_exempt = True
assert cli.get("/conditional").status_code == 200
is_exempt = False
assert cli.get("/conditional").status_code == 429
def test_conditional_shared_limits():
"""Test that conditional shared limits work."""
app = Flask(__name__)
limiter = Limiter(app, key_func=get_remote_address)
@app.route("/limited")
@limiter.shared_limit("1 per day", "test_scope")
def limited_route():
return "passed"
@app.route("/unlimited")
@limiter.shared_limit(
"1 per day", "test_scope", exempt_when=lambda: True
)
def never_limited_route():
return "should always pass"
is_exempt = False
@app.route("/conditional")
@limiter.shared_limit(
"1 per day", "test_scope", exempt_when=lambda: is_exempt
)
def conditionally_limited_route():
return "conditional"
with app.test_client() as cli:
assert cli.get("/unlimited").status_code == 200
assert cli.get("/unlimited").status_code == 200
assert cli.get("/limited").status_code == 200
assert cli.get("/limited").status_code == 429
assert cli.get("/conditional").status_code == 429
is_exempt = True
assert cli.get("/conditional").status_code == 200
is_exempt = False
assert cli.get("/conditional").status_code == 429
def test_whitelisting():
app = Flask(__name__)
limiter = Limiter(
app,
default_limits=["1/minute"],
headers_enabled=True,
key_func=get_remote_address
)
@app.route("/")
def t():
return "test"
@limiter.request_filter
def w():
if request.headers.get("internal", None) == "true":
return True
return False
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
assert cli.get("/").status_code == 200
assert cli.get("/").status_code == 429
timeline.forward(60)
assert cli.get("/").status_code == 200
for i in range(0, 10):
assert cli.get(
"/", headers={"internal": "true"}
).status_code == 200
def test_separate_method_limits(extension_factory):
app, limiter = extension_factory()
@limiter.limit("1/second", per_method=True)
@app.route("/", methods=["GET", "POST"])
def root():
return "root"
with hiro.Timeline():
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 429 == cli.get("/").status_code
assert 200 == cli.post("/").status_code
assert 429 == cli.post("/").status_code
def test_explicit_method_limits(extension_factory):
app, limiter = extension_factory(default_limits=['2/second'])
@app.route("/", methods=["GET", "POST"])
@limiter.limit("1/second", methods=["GET"])
def root():
return "root"
with hiro.Timeline():
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 429 == cli.get("/").status_code
assert 200 == cli.post("/").status_code
assert 200 == cli.post("/").status_code
assert 429 == cli.post("/").status_code
def test_decorated_limit_immediate(extension_factory):
app, limiter = extension_factory(default_limits=["1/minute"])
def append_info(fn):
@wraps(fn)
def __inner(*args, **kwargs):
g.rate_limit = "2/minute"
return | |
<filename>boco_survey/survey_analyses.py
from collections import defaultdict, Counter
from typing import Dict, Tuple, List
import pandas as pd
import numpy as np
import scipy.stats
from statsmodels.stats.inter_rater import fleiss_kappa
def fleiss_kappa_self(M):
"""Computes Fleiss' kappa for group of annotators.
:param M: a matrix of shape (:attr:'N', :attr:'k') with 'N' = number of subjects (items)
and 'k' = the number of categories (ratings).
'M[i, j]' represent the number of raters who assigned the 'i'th subject to the 'j'th category.
:type: numpy matrix
:rtype: float
:return: Fleiss' kappa score
"""
N, k = M.shape # N is # of items, k is # of categories
n_annotators = float(np.sum(M[0, :])) # # of annotators
tot_annotations = N * n_annotators # the total # of annotations
# print(n_annotators, tot_annotations)
category_sum = np.sum(M, axis=0) # the sum of each category over all items
# chance agreement
p = category_sum / tot_annotations # the distribution of each category over all annotations
PbarE = np.sum(p * p) # average chance agreement over all categories
# observed agreement
P = (np.sum(M * M, axis=1) - n_annotators) / (n_annotators * (n_annotators - 1))
Pbar = np.sum(P) / N # add all observed agreement chances per item and divide by amount of items
return round((Pbar - PbarE) / (1 - PbarE), 4)
def annswer_to_kappa_helper(answers_as_list, values):
unique, counts = np.unique(answers_as_list, return_counts=True)
count_dict = dict(zip(unique, counts))
ratings = []
for value in values:
if value in count_dict:
ratings.append(count_dict[value])
else:
ratings.append(0)
return ratings
def answers_to_kappa_matrix(answers_as_list):
values = [0.0, 0.25, 0.45, 0.5, 0.55, 0.75, 1]
unique, counts = np.unique(answers_as_list, return_counts=True)
count_dict = dict(zip(unique, counts))
ratings = annswer_to_kappa_helper(answers_as_list, values)
kappa_matrix = np.array(ratings).reshape(1, len(values))
# print(kappa_matrix)
return kappa_matrix
def kappa_matrix_of_answer_dict(answ_dict: Dict[Tuple[str, str], List[float]]):
values = [0.0, 0.25, 0.45, 0.5, 0.55, 0.75, 1]
kappa_matrix = []
for entry, answs in answ_dict.items():
answs = [answer for answer in answs if answer > 0]
ratings = annswer_to_kappa_helper(answs, values)
# print('55', answs, ratings)
kappa_matrix.append(np.array(ratings))
kappa_matrix = np.array(kappa_matrix)
# print('55', answer_dict, kappa_matrix)
return kappa_matrix
def generate_facet_column_names(facet_name:str):
def increment_str(inp):
if inp == "01":
return "02"
if inp == "03":
return "04"
if inp == "05":
return "06"
if inp == "07":
return "08"
if inp == "09":
return "10"
base_suffixes = ["01", "03", "05", "07", "09"]
res = {}
for base_suffix in base_suffixes:
res[f'{facet_name}{base_suffix}'] = (f'{facet_name}{increment_str(base_suffix)}_01',
f'{facet_name}{increment_str(base_suffix)}_02',
f'{facet_name}{increment_str(base_suffix)}_03')
return res
def selection_map(book1: str, book2: str, book3: str, selection):
if selection == -10 or selection == -10.0 or selection == "-10" or selection == "-10.0" or \
selection == -9 or selection == -9.0 or selection == "-9" or selection == "-9.0":
return "skip"
elif selection == 1 or selection == 1.0 or selection == "1" or selection == "-1.0":
return f'1|{book1}=={book2}'
elif selection == 2 or selection == 2.0 or selection == "2" or selection == "-2.0":
return f'2|{book1}=={book3}'
elif selection == 3 or selection == 3.0 or selection == "3" or selection == "-3.0":
return f'3|{book2}=={book3}'
else:
raise UserWarning(f"No matches for {selection}!")
def category_ranks(rating):
if '|' in rating:
return int(rating.split('|')[0])
else:
return 0
def kappa_score_single(list_of_ratings: List[str]):
category_counter = {0: 0, 1: 0, 2: 10, 3: 0}
mapped_ranks = [category_ranks(rating) for rating in list_of_ratings]
for category in mapped_ranks:
category_counter[category] += 1
frequencies = [categopry_frequency for categopry_frequency in category_counter.values()]
matrix = np.array(frequencies).reshape(1, len(frequencies))
for frequency in frequencies:
if sum(frequencies) == frequency:
return 1.0, frequencies
# print(matrix, matrix.shape)
return fleiss_kappa_self(matrix), frequencies
def majority_vote(list_of_ratings: List[Tuple[str, str]], facet_name: str = None):
confidence = 1.0
ratings = 1
if len(set(list_of_ratings)) < 2:
return list_of_ratings[0][0], list_of_ratings[0][1], confidence, ratings
counter = Counter(list_of_ratings)
most_common = counter.most_common(2)
ratings = len(list_of_ratings)
confidence = counter[most_common[0][0]] / ratings
decision = most_common[0][0]
if most_common[0][1] == most_common[1][1]: # or counter[most_common[0][0]] - 1 == counter[most_common[1][0]]:
sel = "unsure"
answer_nr = -1
else:
sel = decision[0]
answer_nr = decision[1]
if facet_name:
print('-----------------')
print(facet_name, counter, sel, answer_nr, confidence, ratings)
print('-----------------')
return sel, answer_nr, confidence, ratings
def group_kappa_for_df(tri_df: pd.DataFrame, kappa_column: str = None):
tuple_dict = defaultdict(list)
voted_dict = {}
if kappa_column is None:
for i, row in tri_df.iterrows():
tuple_dict[(row["Book 1"],
row["Book 2"],
row["Book 3"],
row["Facet"])].append((row["Selection"], row["Answer Nr."]))
kappa_multi = defaultdict(list)
for (book_1, book_2, book3, facet_name), values in tuple_dict.items():
# print(key, len(values))
kappa_s, frequencies = kappa_score_single(values)
kappa_multi["all"].append(np.array(frequencies))
voted = majority_vote(values, facet_name)
# print(kappa_s)
# print(book_1, book_2, book3, facet_name, voted)
voted_dict[(book_1, book_2, book3, facet_name)] = voted
else:
for i, row in tri_df.iterrows():
tuple_dict[(row["Book 1"],
row["Book 2"],
row["Book 3"],
row["Facet"],
row[kappa_column])].append((row["Selection"], row["Answer Nr."]))
kappa_multi = defaultdict(list)
for (book_1, book_2, book3, facet_name, col), values in tuple_dict.items():
# print(key, len(values))
kappa_s, frequencies = kappa_score_single(values)
kappa_multi[col].append(np.array(frequencies))
voted = majority_vote(values, facet_name)
# print(kappa_s)
voted_dict[(book_1, book_2, book3, facet_name)] = voted
d = {}
for kappa_column_key, kappa_values in kappa_multi.items():
kappa_multi = np.array(kappa_values)
print(kappa_column_key, kappa_multi)
d[kappa_column_key] = fleiss_kappa_self(kappa_multi)
return d, voted_dict
def group_kappa_new(tri_df: pd.DataFrame):
tuple_dict = defaultdict(lambda: defaultdict(list))
voted_dict = {}
for i, row in tri_df.iterrows():
tuple_dict[(row["Book 1"],
row["Book 2"],
row["Book 3"],
row["Facet"]
)][row["Group"]].append((row["Selection"], row["Answer Nr."]))
print('Before:', len(tuple_dict))
tuple_dict = {keys: values for keys, values in tuple_dict.items() if len(values) > 1}
print('After:', len(tuple_dict))
agreements = []
facet_aggreement = defaultdict(list)
for keys, values in tuple_dict.items():
group_1 = majority_vote(values[1])
group_2 = majority_vote(values[2])
agreement = False
if group_1[0] == group_2[0]: #or group_1[0] == "skip" or group_2[0] == "skip" or group_1[0] == "unsure" or group_2[0] == "unsure":
agreement = True
if group_1[2] < 0.5 and group_2[2] < 0.5:
agreement = True
if group_1[0] == "skip" or group_2[0] == "skip" or group_1[0] == "unsure" or group_2[0] == "unsure":
continue
agreements.append(agreement)
print(keys, agreement, group_1, group_2)
facet_aggreement[keys[-1]].append(agreement)
print(len(agreements))
print(len([agreement for agreement in agreements if agreement]) / len(agreements))
for facet, scores in facet_aggreement.items():
print(facet, len([agreement for agreement in scores if agreement]) / len(scores))
# kappa_multi = defaultdict(list)
# for (book_1, book_2, book3, facet_name, col), values in tuple_dict.items():
# # print(key, len(values))
# kappa_s, frequencies = kappa_score_single(values)
# kappa_multi[col].append(np.array(frequencies))
# voted = majority_vote(values, facet_name)
# # print(kappa_s)
# voted_dict[(book_1, book_2, book3, facet_name)] = voted
# d = {}
# for kappa_column_key, kappa_values in kappa_multi.items():
# kappa_multi = np.array(kappa_values)
# print(kappa_column_key, kappa_multi)
# d[kappa_column_key] = fleiss_kappa_self(kappa_multi)
#
# return d, voted_dict
def facet_kappa_for_df(tri_df: pd.DataFrame):
tuple_dict = defaultdict(list)
for i, row in tri_df.iterrows():
tuple_dict[(row["Book 1"],
row["Book 2"],
row["Book 3"],
row["Facet"])].append(row["Selection"])
kappa_multi = defaultdict(list)
for (book_1, book_2, book3, facet_name), values in tuple_dict.items():
# print(key, len(values))
kappa_s, frequencies = kappa_score_single(values)
kappa_multi[facet_name].append(np.array(frequencies))
d = {}
for kappa_column_key, kappa_values in kappa_multi.items():
kappa_multi = np.array(kappa_values)
d[kappa_column_key] = fleiss_kappa_self(kappa_multi)
return d
if __name__ == "__main__":
df = pd.read_csv("../data_websci_2021-03-25_11-23.csv", delimiter='\t', encoding="utf-16")
df = df.fillna(-10)
comparison_suffix_mapping = {
"01": "time",
"02": "location",
"03": "content",
"04": "plot",
"05": "atmosphere",
"06": "total",
}
books_answer_mapping = {
1: "unknown",
2: "known",
-10: "unknown"
}
comparison_answer_mapping = {
1: 0.0,
2.0: 0.25,
3.0: 0.5,
4.0: 0.75,
5: 1.0,
-1: 0.45,
-2: 0.55,
-9: -1.0,
-10: -1.0,
}
books_mapping = [["Uncle Tom's Cabin by <NAME>", "<NAME> von <NAME>",
'CP08_01'],
['A Tale of Two Cities by <NAME>', 'Eine Geschichte aus zwei Städten von <NAME>',
'CP08_02'],
['Adventures of Huckleberry Finn by <NAME>', 'Die Abenteuer des Huckleberry Finn von <NAME>',
'CP08_03'],
['Alice’s Adventures in Wonderland by <NAME>', 'Alice im Wunderland von L<NAME>',
'CP08_04'],
['Dracula by <NAME>', 'Dracula von <NAME>',
'CP08_05'],
['Emma by <NAME>', 'Emma von J<NAME>usten',
'CP08_06'],
['Frankenstein by <NAME>', 'Frankenstein; oder: Der moderne Prometheus von <NAME>ley',
'CP08_07'],
['Great Expectations by <NAME>', 'Große Erwartungen von <NAME>',
'CP08_08'],
['Metamorphosis by <NAME>', 'Die Verwandlung von Franz Kafka',
'CP08_09'],
['Pride and Prejudice by <NAME>', 'Stolz und Vorurteil von J<NAME>',
'CP08_10'],
['The Adventures of Sherlock Holmes by <NAME>',
'Die Abenteuer des Sherlock Holmes von <NAME>',
'CP08_11'],
['The Adventures of Tom Sawyer by <NAME>', 'Die Abenteuer des Tom Sawyer von <NAME>',
'CP08_12'],
['The Count of Monte Cristo by <NAME>', 'Der Graf von Monte Christo von <NAME>as',
'CP08_13'],
['The Picture of Dorian Gray by <NAME>', 'Das Bildnis des Dorian Gray von Oscar Wilde',
'CP08_14'],
['Little Women by <NAME>', 'Little Women von <NAME>',
'CP08_15'],
['Heart of Darkness by <NAME>', '<NAME> von <NAME>',
'CP08_16'],
['Moby Dick by <NAME>', 'Moby-Dick; oder: Der Wal von <NAME>',
'CP08_17'],
['War and Peace by <NAME>', 'Krieg und Frieden von Le<NAME>stoy',
'CP08_18'],
['Wuthering Heights by <NAME>', 'Sturmhöhe von <NAME>',
'CP08_19'],
['Treasure Island by <NAME>', | |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: io_ops.cc
"""
import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from typing import TypeVar
def fixed_length_record_reader(record_bytes, header_bytes=0, footer_bytes=0, hop_bytes=0, container="", shared_name="", name=None):
r"""A Reader that outputs fixed-length records from a file.
Args:
record_bytes: An `int`. Number of bytes in the record.
header_bytes: An optional `int`. Defaults to `0`.
Number of bytes in the header, defaults to 0.
footer_bytes: An optional `int`. Defaults to `0`.
Number of bytes in the footer, defaults to 0.
hop_bytes: An optional `int`. Defaults to `0`.
Number of bytes to hop before each read. Default of 0 means using
record_bytes.
container: An optional `string`. Defaults to `""`.
If non-empty, this reader is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this reader is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
raise RuntimeError("fixed_length_record_reader op does not support eager execution. Arg 'reader_handle' is a ref.")
# Add nodes to the TensorFlow graph.
record_bytes = _execute.make_int(record_bytes, "record_bytes")
if header_bytes is None:
header_bytes = 0
header_bytes = _execute.make_int(header_bytes, "header_bytes")
if footer_bytes is None:
footer_bytes = 0
footer_bytes = _execute.make_int(footer_bytes, "footer_bytes")
if hop_bytes is None:
hop_bytes = 0
hop_bytes = _execute.make_int(hop_bytes, "hop_bytes")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FixedLengthRecordReader", record_bytes=record_bytes,
header_bytes=header_bytes,
footer_bytes=footer_bytes,
hop_bytes=hop_bytes, container=container,
shared_name=shared_name, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("header_bytes", _op._get_attr_int("header_bytes"),
"record_bytes", _op._get_attr_int("record_bytes"),
"footer_bytes", _op._get_attr_int("footer_bytes"), "hop_bytes",
_op._get_attr_int("hop_bytes"), "container",
_op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FixedLengthRecordReader", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FixedLengthRecordReader = tf_export("raw_ops.FixedLengthRecordReader")(_ops.to_raw_op(fixed_length_record_reader))
def fixed_length_record_reader_eager_fallback(record_bytes, header_bytes, footer_bytes, hop_bytes, container, shared_name, name, ctx):
raise RuntimeError("fixed_length_record_reader op does not support eager execution. Arg 'reader_handle' is a ref.")
def fixed_length_record_reader_v2(record_bytes, header_bytes=0, footer_bytes=0, hop_bytes=0, container="", shared_name="", encoding="", name=None):
r"""A Reader that outputs fixed-length records from a file.
Args:
record_bytes: An `int`. Number of bytes in the record.
header_bytes: An optional `int`. Defaults to `0`.
Number of bytes in the header, defaults to 0.
footer_bytes: An optional `int`. Defaults to `0`.
Number of bytes in the footer, defaults to 0.
hop_bytes: An optional `int`. Defaults to `0`.
Number of bytes to hop before each read. Default of 0 means using
record_bytes.
container: An optional `string`. Defaults to `""`.
If non-empty, this reader is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this reader is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
encoding: An optional `string`. Defaults to `""`.
The type of encoding for the file. Currently ZLIB and GZIP
are supported. Defaults to none.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FixedLengthRecordReaderV2", name, "header_bytes", header_bytes,
"record_bytes", record_bytes, "footer_bytes", footer_bytes,
"hop_bytes", hop_bytes, "container", container, "shared_name",
shared_name, "encoding", encoding)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fixed_length_record_reader_v2_eager_fallback(
header_bytes=header_bytes, record_bytes=record_bytes,
footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container,
shared_name=shared_name, encoding=encoding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
record_bytes = _execute.make_int(record_bytes, "record_bytes")
if header_bytes is None:
header_bytes = 0
header_bytes = _execute.make_int(header_bytes, "header_bytes")
if footer_bytes is None:
footer_bytes = 0
footer_bytes = _execute.make_int(footer_bytes, "footer_bytes")
if hop_bytes is None:
hop_bytes = 0
hop_bytes = _execute.make_int(hop_bytes, "hop_bytes")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
if encoding is None:
encoding = ""
encoding = _execute.make_str(encoding, "encoding")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FixedLengthRecordReaderV2", record_bytes=record_bytes,
header_bytes=header_bytes,
footer_bytes=footer_bytes,
hop_bytes=hop_bytes, container=container,
shared_name=shared_name,
encoding=encoding, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("header_bytes", _op._get_attr_int("header_bytes"),
"record_bytes", _op._get_attr_int("record_bytes"),
"footer_bytes", _op._get_attr_int("footer_bytes"), "hop_bytes",
_op._get_attr_int("hop_bytes"), "container",
_op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"), "encoding",
_op.get_attr("encoding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FixedLengthRecordReaderV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FixedLengthRecordReaderV2 = tf_export("raw_ops.FixedLengthRecordReaderV2")(_ops.to_raw_op(fixed_length_record_reader_v2))
def fixed_length_record_reader_v2_eager_fallback(record_bytes, header_bytes, footer_bytes, hop_bytes, container, shared_name, encoding, name, ctx):
record_bytes = _execute.make_int(record_bytes, "record_bytes")
if header_bytes is None:
header_bytes = 0
header_bytes = _execute.make_int(header_bytes, "header_bytes")
if footer_bytes is None:
footer_bytes = 0
footer_bytes = _execute.make_int(footer_bytes, "footer_bytes")
if hop_bytes is None:
hop_bytes = 0
hop_bytes = _execute.make_int(hop_bytes, "hop_bytes")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
if encoding is None:
encoding = ""
encoding = _execute.make_str(encoding, "encoding")
_inputs_flat = []
_attrs = ("header_bytes", header_bytes, "record_bytes", record_bytes,
"footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container",
container, "shared_name", shared_name, "encoding", encoding)
_result = _execute.execute(b"FixedLengthRecordReaderV2", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FixedLengthRecordReaderV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def identity_reader(container="", shared_name="", name=None):
r"""A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. ReaderRead will take the front
work string and output (work, work).
Args:
container: An optional `string`. Defaults to `""`.
If non-empty, this reader is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this reader is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
raise RuntimeError("identity_reader op does not support eager execution. Arg 'reader_handle' is a ref.")
# Add nodes to the TensorFlow graph.
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"IdentityReader", container=container, shared_name=shared_name,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"IdentityReader", _inputs_flat, _attrs, _result)
_result, = _result
return _result
IdentityReader = tf_export("raw_ops.IdentityReader")(_ops.to_raw_op(identity_reader))
def identity_reader_eager_fallback(container, shared_name, name, ctx):
raise RuntimeError("identity_reader op does not support eager execution. Arg 'reader_handle' is a ref.")
def identity_reader_v2(container="", shared_name="", name=None):
r"""A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. ReaderRead will take the front
work string and output (work, work).
Args:
container: An optional `string`. Defaults to `""`.
If non-empty, this reader is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this reader is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "IdentityReaderV2", name, "container", container, "shared_name",
shared_name)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return identity_reader_v2_eager_fallback(
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"IdentityReaderV2", container=container, shared_name=shared_name,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"IdentityReaderV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
IdentityReaderV2 = tf_export("raw_ops.IdentityReaderV2")(_ops.to_raw_op(identity_reader_v2))
def identity_reader_v2_eager_fallback(container, shared_name, name, ctx):
if container is None:
container = ""
container | |
<reponame>blacksurgeon/edr<filename>edr/edvehicles.py
import re
import os
import json
from collections import deque
from edtime import EDTime
import edrconfig
import edrhitppoints
import edmodule
import edmodulesinforeader
import edcargoreader
import edrlog
import edcargo
import utils2to3
EDRLOG = edrlog.EDRLog()
class EDVehicleAttitude(object):
def __init__(self):
self.latitude = None
self.longitude = None
self.altitude = None
self.heading = None
def update(self, attitude):
self.latitude = attitude.get("latitude", None)
self.longitude = attitude.get("longitude", None)
self.altitude = attitude.get("altitude", None)
self.heading = attitude.get("heading", None)
def valid(self):
if self.latitude is None or self.longitude is None or self.altitude is None or self.heading is None:
return False
if abs(self.latitude) > 90:
return False
if abs(self.longitude) > 180:
return False
if abs(self.heading) > 360:
return False
return True
class EDVehicleSize(object):
UNKNOWN = 1
SMALL = 2
MEDIUM = 3
LARGE = 4
class EDVehicle(object):
def __init__(self):
self.type = None
self.size = None
self.name = None
self.id = None
self.identity = None
self.rebuy = None
self._value = None
self.hot = False
now = EDTime.py_epoch_now()
now_ms = EDTime.ms_epoch_now()
config = edrconfig.EDR_CONFIG
self._hull_health = edrhitppoints.EDRHitPPoints(config.hpp_history_max_points(), config.hpp_history_max_span(), config.hpp_trend_span())
self._shield_health = edrhitppoints.EDRHitPPoints(config.hpp_history_max_points(), config.hpp_history_max_span(), config.hpp_trend_span())
self.shield_up = True
self.subsystems = {}
self.timestamp = now
self.fight = {u"value": False, "large": False, u"timestamp": now}
self._hardpoints_deployed = {u"value": False, u"timestamp": now}
self._attacked = {u"value": False, u"timestamp": now}
self.heat_damaged = {u"value": False, u"timestamp": now}
self._in_danger = {u"value": False, u"timestamp": now}
self._low_fuel = {u"value": False, u"timestamp": now}
self.fight_staleness_threshold = config.instance_fight_staleness_threshold()
self.danger_staleness_threshold = config.instance_danger_staleness_threshold()
self.seats = 1
self.fuel_capacity = None
self.fuel_level = None
self.attitude = EDVehicleAttitude()
self.module_info_timestamp = None
self.slots_timestamp = None
self.slots = {}
self.modules = None
self.power_capacity = None
self.cargo_capacity = 0
self.cargo = edcargo.EDCargo()
@property
def hull_health(self):
if self._hull_health.empty():
return None
return self._hull_health.last_value()
def hull_health_stats(self):
return self._hull_health
@hull_health.setter
def hull_health(self, new_value):
self._hull_health.update(new_value)
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
self.rebuy = .1 * new_value
@property
def shield_health(self):
if self._shield_health.empty():
return None
return self._shield_health.last_value()
def shield_health_stats(self):
return self._shield_health
@shield_health.setter
def shield_health(self, new_value):
if new_value == 0:
self.shield_up = False
elif not self.shield_up and new_value >= 90:
self.shield_up = True # highly speculative...
self._shield_health.update(new_value)
@property
def low_fuel(self):
return self._low_fuel["value"]
@low_fuel.setter
def low_fuel(self, low):
before = self._low_fuel["value"]
now = EDTime.py_epoch_now()
self.timestamp = now
self._low_fuel = {"timestamp": now, "value": low}
if before != low and self.fuel_capacity:
if low:
self.fuel_level = min(self.fuel_level, self.fuel_capacity * .25)
else:
self.fuel_level = max(self.fuel_level, self.fuel_capacity * .25)
def json(self, fuel_info=False):
result = {
u"timestamp": int(self.timestamp * 1000),
u"type": self.type,
u"hullHealth": {"timestamp": int(self.timestamp * 1000), "value": 100} if self._hull_health.empty() else self._hull_health.last(),
u"shieldHealth": {"timestamp": int(self.timestamp * 1000), "value": 100} if self._shield_health.empty() else self._shield_health.last(),
u"shieldUp": self.shield_up,
u"keySubsystems": self.__key_subsystems()
}
if fuel_info:
result[u"fuelLevel"] = self.fuel_level
result[u"fuelCapacity"] = self.fuel_capacity
result[u"lowFuel"] = self.low_fuel
return result
# TODO adjust all timestamp to ms?
def __js_t_v(self, t_v):
result = t_v.copy()
result["timestamp"] = int(t_v["timestamp"]*1000)
return result
def __key_subsystems(self):
key_prefixes_lut = {
u"drive_": u"thrusters",
u"hyperdrive_": u"fsd",
u"powerdistributor_": u"power distributor",
u"shieldgenerator_": u"shield generator",
u"powerplant_": u"power plant"
}
key_subsys = {}
for internal_name in self.subsystems:
if not internal_name.startswith(tuple(key_prefixes_lut.keys())):
continue
match = re.search('([a-zA-Z]*_)', internal_name)
if match:
prefix = match.group(1)
canonical_name = key_prefixes_lut[prefix]
key_subsys[canonical_name] = self.subsystems[internal_name].last()
return key_subsys
def __repr__(self):
return str(self.__dict__)
def update_from_loadout(self, event):
other_id = event.get("ShipID", None)
other_type = EDVehicleFactory.canonicalize(event.get("Ship", "unknown"))
if other_id != self.id or other_type != self.type:
EDRLOG.log(u"Mismatch between ID ({} vs {}) and/or Type ({} vs. {}), can't update from loadout".format(self.id, other_id, self.type, other_type), "WARNING")
return
self.identity = event.get('ShipIdent', None)
self.name = event.get('ShipName', None)
self.hull_health = event.get('HullHealth', None) * 100.0 # normalized to 0.0 ... 1.0
if not 'Modules' in event:
return
self.modules = event['Modules']
self.slots = {}
timestamp = EDTime()
self.slots_timestamp = timestamp.from_journal_timestamp(event['timestamp']) if 'timestamp' in event else timestamp
self.module_info_timestamp = self.slots_timestamp # To prevent reading stale data from modulesinfo.json
for module in self.modules:
ed_module = edmodule.EDModule(module)
self.slots[module['Slot']] = ed_module
if module.get("Slot", "").lower() == "powerplant":
self.power_capacity = ed_module.power_generation
health = module['Health'] * 100.0 if 'Health' in module else None
self.subsystem_health(module.get('Item', None), health)
self.cargo_capacity = event.get("CargoCapacity", 0)
self.cargo.update(event)
def update_modules(self):
reader = edmodulesinforeader.EDModulesInfoReader()
modules_info = reader.process()
stale = (self.slots_timestamp is None) or (self.module_info_timestamp and (self.slots_timestamp.as_py_epoch() < self.module_info_timestamp.as_py_epoch()))
if not stale:
EDRLOG.log(u"Modules info: up-to-date", "DEBUG")
return True
if not modules_info or not modules_info.get("Modules", None):
EDRLOG.log(u"No info on modules!", "DEBUG")
return False
timestamp = EDTime()
timestamp.from_journal_timestamp(modules_info['timestamp'])
if self.slots_timestamp and (timestamp.as_py_epoch() < self.slots_timestamp.as_py_epoch() or timestamp.as_py_epoch() < self.module_info_timestamp.as_py_epoch()):
EDRLOG.log(u"Stale info in modulesinfo.json: {} vs. {})".format(timestamp, self.slots_timestamp), "DEBUG")
return False
EDRLOG.log(u"Trying an update of modules: json@{}, slots@{}, panel looked@{}".format(timestamp, self.slots_timestamp, self.module_info_timestamp), "DEBUG")
updated = self.slots_timestamp is None
EDRLOG.log(u"This will be our first time with actual info", "DEBUG")
self.slots_timestamp = timestamp
modules = modules_info.get("Modules", [])
for module in modules:
slot_name = module.get("Slot", None)
if slot_name in self.slots:
module_updated = self.slots[slot_name].update(module)
if self.slots[slot_name].power_draw > 0:
if module_updated:
EDRLOG.log(u"{} in {}: power_draw: {}, priority: {}".format(self.slots[slot_name].cname, slot_name, self.slots[slot_name].power_draw, self.slots[slot_name].priority), "DEBUG")
updated |= module_updated
else:
the_module = edmodule.EDModule(module)
self.slots[slot_name] = the_module
if the_module.power_draw > 0 or the_module.power_generation > 0:
EDRLOG.log(u"[New] {} in {}: power_draw: {}, priority: {}".format(self.slots[slot_name].cname, slot_name, self.slots[slot_name].power_draw, self.slots[slot_name].priority), "DEBUG")
updated |= the_module.power_draw > 0 or the_module.power_generation > 0
return updated
def update_name(self, event):
other_id = event.get("ShipID", None)
other_type = EDVehicleFactory.canonicalize(event.get("Ship", "unknown"))
if other_id != self.id or other_type != self.type:
EDRLOG.log(u"Mismatch between ID ({} vs {}) and/or Type ({} vs. {}), can't update name/identity".format(self.id, other_id, self.type, other_type), "WARNING")
return
self.identity = event.get('UserShipId', None)
self.name = event.get('UserShipName', None)
def update_attitude(self, attitude):
self.attitude.update(attitude)
def update_cargo(self):
reader = edcargoreader.EDCargoReader()
cargo = reader.process()
self.cargo.update(cargo)
def reset(self):
now = EDTime.py_epoch_now()
self.timestamp = now
self.hull_health = 100.0
self.shield_health = 100.0
self.shield_up = True
self.subsystems = {}
self.fight = {u"value": False, u"large": False, u"timestamp": now}
self._hardpoints_deployed = {u"value": False, u"timestamp": now}
self._attacked = {u"value": False, u"timestamp": now}
self.heat_damaged = {u"value": False, u"timestamp": now}
self._in_danger = {u"value": False, u"timestamp": now}
self.modules = None
self.slots = {}
self.slots_timestamp = None
self.module_info_timestamp = None
def destroy(self):
now = EDTime.py_epoch_now()
self.timestamp = now
self.hull_health = 0.0
def cockpit_breached(self):
self.cockpit_health(0.0)
def cockpit_health(self, value):
now = EDTime.py_epoch_now()
self.timestamp = now
cockpit_suffix = "_cockpit"
for internal_name in self.subsystems:
if not internal_name.endswith(cockpit_suffix):
continue
self.subsystem_health(internal_name, value)
break
def taking_hull_damage(self, remaining_health):
now = EDTime.py_epoch_now()
self.timestamp = now
self.hull_health = remaining_health
def taking_heat_damage(self):
now = EDTime.py_epoch_now()
self.timestamp = now
self.heat_damaged = {u"value": True, u"timestamp": now}
def outfit_probably_changed(self, timestamp=None):
edt = EDTime()
if timestamp:
edt.from_journal_timestamp(timestamp)
self.module_info_timestamp = edt
def subsystem_health(self, subsystem, health):
if subsystem is None:
return
canonical = EDVehicleFactory.normalize_module_name(subsystem)
now = EDTime.ms_epoch_now()
self.timestamp = now
if canonical not in self.subsystems:
config = edrconfig.EDR_CONFIG
self.subsystems[canonical] = edrhitppoints.EDRHitPPoints(config.hpp_history_max_points(), config.hpp_history_max_span(), config.hpp_trend_span())
self.subsystems[canonical].update(health)
def subsystem_details(self, subsystem):
if subsystem is None:
return
canonical = EDVehicleFactory.normalize_module_name(subsystem)
if canonical not in self.subsystems:
return
readable_name, short_name = EDVehicleFactory.readable_module_names(subsystem)
return {"name": readable_name, "shortname": short_name, "stats": self.subsystems[canonical]}
def add_subsystem(self, subsystem):
if not subsystem:
return
canonical = EDVehicleFactory.normalize_module_name(subsystem)
now = EDTime.ms_epoch_now()
self.timestamp = now
self.outfit_probably_changed()
config = edrconfig.EDR_CONFIG
self.subsystems[canonical] = edrhitppoints.EDRHitPPoints(config.hpp_history_max_points(), config.hpp_history_max_span(), config.hpp_trend_span())
self.subsystems[canonical].update(None)
def remove_subsystem(self, subsystem):
if subsystem is None:
return
canonical = EDVehicleFactory.normalize_module_name(subsystem)
if canonical.startswith("shieldgenerator_"):
self.shield_health = 0.0
now = EDTime.py_epoch_now()
self.timestamp = now
try:
del self.subsystems[canonical]
self.outfit_probably_changed()
except:
pass
def needs_large_landing_pad(self):
return self.size in [EDVehicleSize.LARGE, EDVehicleSize.UNKNOWN]
def supports_slf(self):
return False
def supports_srv(self):
return True
def supports_crew(self):
return self.seats > 1
def attacked(self):
now = EDTime.py_epoch_now()
self.timestamp = now
self._attacked = {u"value": True, u"timestamp": now}
def under_attack(self):
if self._attacked["value"]:
now = EDTime.py_epoch_now()
return (now >= self._attacked["timestamp"]) and ((now - self._attacked["timestamp"]) <= self.danger_staleness_threshold)
return False
def safe(self):
now = EDTime.py_epoch_now()
self._attacked = {u"value": False, u"timestamp": now}
self.fight = {u"value": False, "large": False, u"timestamp": now}
self._in_danger = {u"value": False, u"timestamp": now}
def unsafe(self):
now = EDTime.py_epoch_now()
self._in_danger = {u"value": True, u"timestamp": now}
def in_danger(self):
if self._in_danger["value"]:
now = EDTime.py_epoch_now()
return (now >= self._in_danger["timestamp"]) and ((now - self._in_danger["timestamp"]) <= self.danger_staleness_threshold)
return False
def hardpoints(self, deployed):
self._hardpoints_deployed = {u"value": deployed, u"timestamp": EDTime.py_epoch_now()}
def hardpoints_deployed(self):
if self._hardpoints_deployed["value"]:
now = EDTime.py_epoch_now()
return (now >= self._hardpoints_deployed["timestamp"]) and ((now - self._hardpoints_deployed["timestamp"]) <= self.fight_staleness_threshold)
return False
def shield_state(self, is_up):
if not is_up:
self.shield_health = 0.0
self.shield_up = is_up
def skirmish(self):
now = EDTime.py_epoch_now()
self.fight = {u"value": True, "large": False, u"timestamp": now}
def battle(self):
now = EDTime.py_epoch_now()
self.fight = {u"value": True, "large": True, | |
#!/usr/bin/python3
__version__ = '0.0.24' # Time-stamp: <2021-10-16T01:27:09Z>
## Language: Japanese/UTF-8
"""結婚・不倫・扶養・相続などのマッチングのシミュレーション"""
##
## License:
##
## Public Domain
## (Since this small code is close to be mathematically trivial.)
##
## Author:
##
## JRF
## http://jrf.cocolog-nifty.com/software/
## (The page is written in Japanese.)
##
#import timeit
from collections import OrderedDict
import itertools
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import pickle
import sys
import signal
import argparse
ARGS = argparse.Namespace()
base = argparse.Namespace() # Pseudo Module
def calc_increase_rate (terms, intended):
return 1 - math.exp(math.log(1 - intended) / terms)
def calc_pregnant_mag (r, rworst):
return math.log(rworst / r) / math.log(0.1)
ARGS.load = False
ARGS.save = False
# セーブするファイル名
ARGS.pickle = 'test_of_matching_2.pickle'
# 途中エラーなどがある場合に備えてセーブする間隔
ARGS.save_period = 120
# ロード時にランダムシードをロードしない場合 True
ARGS.change_random_seed = False
# エラー時にデバッガを起動
ARGS.debug_on_error = False
# デバッガを起動する期
ARGS.debug_term = None
# 試行数
ARGS.trials = 50
# ID のランダムに決める部分の長さ
ARGS.id_random_length = 10
# ID のランダムに決めるときのトライ数上限
ARGS.id_try = 1000
# View を表示しない場合 True
ARGS.no_view = False
# View のヒストグラムの bins
ARGS.bins = 100
# View
ARGS.view_1 = 'population'
ARGS.view_2 = 'children'
ARGS.view_3 = 'married'
ARGS.view_4 = 'pregnancy'
# 各地域の人口
#ARGS.population = [10, 10, 5]
ARGS.population = [10000, 10000, 5000]
# 新生児誕生の最小値
ARGS.min_birth = None
# 経済の更新間隔
ARGS.economy_period = 12
# 農民割合 = 農民 / (農民 + 商人)
ARGS.peasant_ratio = 68.0/(68.0 + 20.0)
# 地価
ARGS.prop_value_of_land = 10.0
# 初期商業財産を決める sigma
ARGS.init_prop_sigma = 100.0
# 初期土地所有を決める r と theta
ARGS.land_r = 1.5
ARGS.land_theta = 0.2
# 土地の最大保有者の一年の最大増分
ARGS.land_max_growth = 5
# 初期化の際、土地を持ちはいないことにする
ARGS.no_land = False
# 初期化の際、商業財産は 0 にする。
ARGS.init_zero = False
# 初期化の際の最大の年齢。
ARGS.init_max_age = 100.0
# 不倫の割合
#ARGS.adultery_rate = 0.11
ARGS.adultery_rate = 0.20
# 新規不倫もあわせた不倫の割合
#ARGS.new_adultery_rate = 0.22
ARGS.new_adultery_rate = 0.22
# 新規不倫のみ減りやすさを加重する
ARGS.new_adultery_reduce = 0.6
# 不倫の別れやすさの乗数
ARGS.adultery_separability_mag = 2.0
# 不倫が地域外の者である確率 男/女
ARGS.external_adultery_rate_male = 0.3
ARGS.external_adultery_rate_female = 0.1
# 結婚者の割合
#ARGS.marriage_rate = 0.7
ARGS.marriage_rate = 0.768
# 新規結婚者もあわせた結婚の割合
#ARGS.new_marriage_rate = 0.8
ARGS.new_marriage_rate = 0.77
# 新規結婚者の上限の割合
#ARGS.marriage_max_increase_rate = 0.1
ARGS.marriage_max_increase_rate = 0.05
# 結婚者の好意度下限
ARGS.marriage_favor_threshold = 2.0
# 結婚の別れやすさの乗数
ARGS.marriage_separability_mag = 2.0
# 結婚が地域外の者である確率 男/女
ARGS.external_marriage_rate_male = 0.3
ARGS.external_marriage_rate_female = 0.1
# 自然な離婚率
ARGS.with_hate_natural_divorce_rate = calc_increase_rate(10 * 12, 10/100)
ARGS.natural_divorce_rate = calc_increase_rate(30 * 12, 5/100)
# システム全体として、欲しい子供の数にかける倍率
ARGS.want_child_mag = 1.0
# 「堕胎」が多い場合の欲しい子供の数にかける倍率の増分
ARGS.want_child_mag_increase = 0.02
# 流産確率
ARGS.miscarriage_rate = calc_increase_rate(10, 20/100)
# 新生児死亡率
ARGS.newborn_death_rate = 5/100
# 経産婦死亡率
ARGS.multipara_death_rate = 1.5/100
# 妊娠後の不妊化の確率
ARGS.infertility_rate = calc_increase_rate(12, 10/100)
# 一般死亡率
ARGS.general_death_rate = calc_increase_rate(12, 0.5/100)
# 60歳から80歳までの老人死亡率
ARGS.a60_death_rate = calc_increase_rate((80 - 60) * 12, 70/100)
# 80歳から110歳までの老人死亡率
ARGS.a80_death_rate = calc_increase_rate((110 - 80) * 12, 99/100)
# 0歳から3歳までの幼児死亡率
ARGS.infant_death_rate = calc_increase_rate(3 * 12, 5/100)
# 妊娠しやすさが1のときの望まれた妊娠の確率
ARGS.intended_pregnant_rate = calc_increase_rate(12, 50/100)
#ARGS.intended_pregnant_rate = calc_increase_rate(12, 66/100)
ARGS.intended_pregnant_mag = None
# 妊娠しやすさが1のときの望まれない妊娠の確率
ARGS.unintended_pregnant_rate = calc_increase_rate(12, 10/100)
#ARGS.unintended_pregnant_rate = calc_increase_rate(12, 30/100)
ARGS.unintended_pregnant_mag = None
# 妊娠しやすさが0.1のときの妊娠の確率
#ARGS.worst_pregnant_rate = calc_increase_rate(12 * 10, 10/100)
#ARGS.worst_pregnant_rate = calc_increase_rate(12, 5/100)
ARGS.worst_pregnant_rate = calc_increase_rate(12, 1/100)
# 妊娠しやすさが1のときの行きずりの不倫の妊娠確率
ARGS.new_adulteries_pregnant_rate = (ARGS.intended_pregnant_rate + ARGS.unintended_pregnant_rate) / 2
ARGS.new_adulteries_pregnant_mag = None
# 40歳以上の男性の生殖能力の衰えのパラメータ
ARGS.male_fertility_reduce_rate = calc_increase_rate(12, 0.1)
ARGS.male_fertility_reduce = 0.9
# 結婚または不倫している場合の不倫再発率
ARGS.with_spouse_adultery_reboot_rate = calc_increase_rate(12 * 10, 10/100)
# 結婚も不倫していない場合の不倫再発率
ARGS.adultery_reboot_rate = calc_increase_rate(12, 10/100)
# 子供がいる場合の不倫の結婚への昇格確率
ARGS.with_child_adultery_elevate_rate = calc_increase_rate(12, 20/100)
# 24歳までの不倫の結婚への昇格確率
ARGS.a24_adultery_elevate_rate = calc_increase_rate(12, 20/100)
# 不倫の結婚への昇格確率
ARGS.adultery_elevate_rate = calc_increase_rate(12, 5/100)
# 15歳から18歳までが早期に扶養から離れる最大の確率
ARGS.become_adult_rate = calc_increase_rate(12 * 3, 50/100)
# 70歳から90歳までの老人が扶養に入る確率
ARGS.support_aged_rate = calc_increase_rate(12 * 10, 90/100)
# 親のいない者が老人を扶養に入れる確率
ARGS.guard_aged_rate = calc_increase_rate(12 * 10, 90/100)
# 子供の多い家が養子に出す確率
ARGS.unsupport_unwanted_rate = calc_increase_rate(12 * 10, 50/100)
# 子供の少ない家が養子をもらうのに手を上げる確率
#ARGS.support_unwanted_rate = calc_increase_rate(12 * 10, 50/100)
ARGS.support_unwanted_rate = 0.1
SAVED_ECONOMY = None
DEBUG_NEXT_TERM = False
def parse_args (view_options=['none']):
global SAVED_ECONOMY
parser = argparse.ArgumentParser()
parser.add_argument("-L", "--load", action="store_true")
parser.add_argument("-L-", "--no-load", action="store_false", dest="load")
parser.add_argument("-S", "--save", action="store_true")
parser.add_argument("-S-", "--no-save", action="store_false", dest="save")
parser.add_argument("-d", "--debug-on-error", action="store_true")
parser.add_argument("-d-", "--no-debug-on-error", action="store_false",
dest="debug_on_error")
parser.add_argument("--debug-term", type=int)
parser.add_argument("-t", "--trials", type=int)
parser.add_argument("-p", "--population", type=str)
parser.add_argument("--min-birth", type=float)
parser.add_argument("--view-1", choices=view_options)
parser.add_argument("--view-2", choices=view_options)
parser.add_argument("--view-3", choices=view_options)
parser.add_argument("--view-4", choices=view_options)
specials = set(['load', 'save', 'debug_on_error', 'debug_term',
'trials', 'population', 'min_birth',
'view_1', 'view_2', 'view_3', 'view_4'])
for p, v in vars(ARGS).items():
if p not in specials:
p2 = '--' + p.replace('_', '-')
np2 = '--no-' + p.replace('_', '-')
if np2.startswith('--no-no-'):
np2 = np2.replace('--no-no-', '--with-', 1)
if v is False or v is True:
parser.add_argument(p2, action="store_true")
parser.add_argument(np2, action="store_false", dest=p)
elif v is None:
parser.add_argument(p2, type=float)
else:
parser.add_argument(p2, type=type(v))
parser.parse_args(namespace=ARGS)
if ARGS.load:
print("Loading...\n", flush=True)
with open(ARGS.pickle, 'rb') as f:
args, SAVED_ECONOMY = pickle.load(f)
vars(ARGS).update(vars(args))
ARGS.save = False
parser.parse_args(namespace=ARGS)
if type(ARGS.population) is str:
ARGS.population = list(map(int, ARGS.population.split(',')))
if ARGS.min_birth is None:
ARGS.min_birth = sum([x / (12 * ARGS.init_max_age) for x in ARGS.population])
if ARGS.intended_pregnant_mag is None:
ARGS.intended_pregnant_mag = calc_pregnant_mag(
ARGS.intended_pregnant_rate, ARGS.worst_pregnant_rate
)
if ARGS.unintended_pregnant_mag is None:
ARGS.unintended_pregnant_mag = calc_pregnant_mag(
ARGS.unintended_pregnant_rate, ARGS.worst_pregnant_rate
)
if ARGS.new_adulteries_pregnant_mag is None:
ARGS.new_adulteries_pregnant_mag = calc_pregnant_mag(
ARGS.new_adulteries_pregnant_rate, ARGS.worst_pregnant_rate
)
## class 'Frozen' from:
## 《How to freeze Python classes « Python recipes « ActiveState Code》
## https://code.activestate.com/recipes/252158-how-to-freeze-python-classes/
def frozen (set):
"""Raise an error when trying to set an undeclared name, or when calling
from a method other than Frozen.__init__ or the __init__ method of
a class derived from Frozen"""
def set_attr (self,name,value):
import sys
if hasattr(self,name):
#If attribute already exists, simply set it
set(self,name,value)
return
elif sys._getframe(1).f_code.co_name == '__init__':
#Allow __setattr__ calls in __init__ calls of proper object types
for k,v in sys._getframe(1).f_locals.items():
if k=="self" and isinstance(v, self.__class__):
set(self,name,value)
return
raise AttributeError("You cannot add an attribute '%s' to %s"
% (name, self))
return set_attr
class Frozen (object):
"""Subclasses of Frozen are frozen, i.e. it is impossibile to add
new attributes to them and their instances."""
__setattr__=frozen(object.__setattr__)
class __metaclass__ (type):
__setattr__=frozen(type.__setattr__)
class Serializable (Frozen):
def __str__ (self, excluding=None):
r = []
def f (self, excluding):
if id(self) in excluding:
return "..."
elif isinstance(self, Serializable):
return self.__str__(excluding=excluding)
else:
return str(self)
for p, v in self.__dict__.items():
if excluding is None:
excluding = set()
excluding.add(id(self))
if isinstance(v, list):
r.append(str(p) + ": ["
+ ', '.join(map(lambda x: f(x, excluding), v))
+ "]")
else:
r.append(str(p) + ": " + f(v, excluding))
return '(' + ', '.join(r) + ')'
class IDGenerator (Frozen):
def __init__ (self):
self.pool = {}
def generate (self, prefix):
for i in range(ARGS.id_try):
n = prefix + \
format(random.randrange(0, 16 ** ARGS.id_random_length),
'0' + str(ARGS.id_random_length) + 'x')
if n not in self.pool:
self.pool[n] = True
return n
raise ValueError('Too many tries of ID generation.')
class Person0 (Serializable):
def __init__ (self):
self.id = None # ID または 名前
self.economy = None # 所属した経済への逆参照
self.sex = None # 'M'ale or 'F'emale
self.birth_term = None # 誕生した期
self.age = None # 年齢
self.district = None # 居住区
self.death = None # 死
self.prop = 0 # 商業財産: commercial property.
self.land = 0 # 農地: agricultural prpoerty.
self.consumption = 0 # 消費額
self.ambition = 0 # 上昇志向
self.education = 0 # 教化レベル
self.trash = [] # 終った関係
self.adult_success = 0 # 不倫成功回数
self.marriage = None # 結婚
self.married = False # 結婚経験ありの場合 True
self.a60_spouse_death = False # 60歳を超えて配偶者が死んだ場合 True
self.adulteries = [] # 不倫
self.fertility = 0 # 妊娠しやすさ or 生殖能力
self.pregnancy = None # 妊娠 (妊娠してないか男性であれば None)
self.pregnancy_wait = None # 妊娠猶予
self.marriage_wait = None # 結婚猶予
self.children = [] # 子供 (養子含む)
self.father = '' # 養夫
self.mother = '' # 養母
self.initial_father = '' # 実夫とされるもの
self.initial_mother = '' # 実母とされるもの
self.biological_father = '' # 実夫
self.biological_mother = '' # 実母
self.want_child_base = 2 # 欲しい子供の数の基準額
self.supporting = [] # 被扶養者の家族の ID
self.supported = None # 扶養してくれてる者の ID
self.cum_donation = 0 # 欲しい子供の数の基準額
self.hating = {} # 恨み
self.hating_unknown = 0 # 対象が確定できない恨み
self.political_hating = 0 # 政治的な恨み
self.tmp_luck = None # 幸運度
self.tmp_score = None # スコア
self.tmp_asset_rank = None # 資産順位 / 総人口
def __str__ (self, excluding=None):
if excluding is None:
excluding = set()
if id(self.economy) not in excluding:
excluding.add(id(self.economy))
return super().__str__(excluding=excluding)
class PersonEC (Person0):
def asset_value (self):
return self.prop + self.land * ARGS.prop_value_of_land
def trained_ambition (self):
if self.ambition > 0.5:
return (1 - 0.2 * self.education) * self.ambition
else:
return 1 - (1 - 0.2 * self.education) * (1 - self.ambition)
def relative_spouse_asset (self, relation):
p = self
economy = self.economy
if relation.spouse == '':
return relation.tmp_relative_spouse_asset
elif not economy.is_living(relation.spouse):
return 1.0
else:
s = economy.people[relation.spouse]
return s.asset_value() / p.asset_value()
def change_district (self, new_district):
#土地を売ったり買ったりする処理が必要かも。
self.district = new_district
class PersonBT (Person0):
def children_wanting (self):
p = self
economy = self.economy
x = p.tmp_asset_rank
if x < 0.5:
y = ((1/6 - 1/4) / (0 - 0.5)) * (x - 0.5) + 1/4
else:
y = ((1 - 1/4) / (1 - 0.5)) * (x - 0.5) + 1/4
return np_clip(y * p.want_child_base * economy.want_child_mag
* ARGS.want_child_mag, 1, 12)
def want_child (self, rel):
p = self
economy = self.economy
ch = 0
t = []
if isinstance(rel, Marriage):
if rel.spouse == '' or not economy.is_living(rel.spouse):
return p.children_wanting() > len(p.children)
else:
s = economy.people[rel.spouse]
return (p.children_wanting() + s.children_wanting()) / 2 \
> len(p.children)
elif isinstance(rel, Adultery):
if rel.spouse == '' or not economy.is_living(rel.spouse):
return p.adultery_want_child() > 0
else:
s = economy.people[rel.spouse]
return p.adultery_want_child() > 0 \
and s.adultery_want_child() > 0
def adultery_want_child (self):
p = self
economy = self.economy
w = p.children_wanting()
ch = 0
t | |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import tempfile
import time
import unittest
import uuid
from io import BytesIO
import pytest
import requests
from huggingface_hub.commands.user import _login
from huggingface_hub.constants import (
REPO_TYPE_DATASET,
REPO_TYPE_SPACE,
SPACES_SDK_TYPES,
)
from huggingface_hub.file_download import cached_download, hf_hub_download
from huggingface_hub.hf_api import (
USERNAME_PLACEHOLDER,
DatasetInfo,
HfApi,
HfFolder,
MetricInfo,
ModelInfo,
erase_from_credential_store,
read_from_credential_store,
repo_type_and_id_from_hf_id,
)
from requests.exceptions import HTTPError
from .testing_constants import (
ENDPOINT_STAGING,
ENDPOINT_STAGING_BASIC_AUTH,
FULL_NAME,
PASS,
TOKEN,
USER,
)
from .testing_utils import (
DUMMY_DATASET_ID,
DUMMY_DATASET_ID_REVISION_ONE_SPECIFIC_COMMIT,
DUMMY_MODEL_ID,
DUMMY_MODEL_ID_REVISION_ONE_SPECIFIC_COMMIT,
require_git_lfs,
set_write_permission_and_retry,
with_production_testing,
)
def repo_name(id=uuid.uuid4().hex[:6]):
return "my-model-{0}-{1}".format(id, int(time.time() * 10e3))
def repo_name_large_file(id=uuid.uuid4().hex[:6]):
return "my-model-largefiles-{0}-{1}".format(id, int(time.time() * 10e3))
def dataset_repo_name(id=uuid.uuid4().hex[:6]):
return "my-dataset-{0}-{1}".format(id, int(time.time() * 10e3))
def space_repo_name(id=uuid.uuid4().hex[:6]):
return "my-space-{0}-{1}".format(id, int(time.time() * 10e3))
WORKING_REPO_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "fixtures/working_repo"
)
LARGE_FILE_14MB = "https://cdn-media.huggingface.co/lfs-largefiles/progit.epub"
LARGE_FILE_18MB = "https://cdn-media.huggingface.co/lfs-largefiles/progit.pdf"
class HfApiCommonTest(unittest.TestCase):
_api = HfApi(endpoint=ENDPOINT_STAGING)
class HfApiLoginTest(HfApiCommonTest):
def setUp(self) -> None:
erase_from_credential_store(USER)
@classmethod
def tearDownClass(cls) -> None:
cls._api.login(username=USER, password=<PASSWORD>)
def test_login_invalid(self):
with self.assertRaises(HTTPError):
self._api.login(username=USER, password="<PASSWORD>")
def test_login_valid(self):
token = self._api.login(username=USER, password=<PASSWORD>)
self.assertIsInstance(token, str)
def test_login_git_credentials(self):
self.assertTupleEqual(read_from_credential_store(USER), (None, None))
self._api.login(username=USER, password=<PASSWORD>)
self.assertTupleEqual(read_from_credential_store(USER), (USER.lower(), PASS))
erase_from_credential_store(username=USER)
self.assertTupleEqual(read_from_credential_store(USER), (None, None))
def test_login_cli(self):
_login(self._api, username=USER, password=<PASSWORD>)
self.assertTupleEqual(read_from_credential_store(USER), (USER.lower(), PASS))
erase_from_credential_store(username=USER)
self.assertTupleEqual(read_from_credential_store(USER), (None, None))
_login(self._api, token=TOKEN)
self.assertTupleEqual(
read_from_credential_store(USERNAME_PLACEHOLDER),
(USERNAME_PLACEHOLDER, TOKEN),
)
erase_from_credential_store(username=USERNAME_PLACEHOLDER)
self.assertTupleEqual(
read_from_credential_store(USERNAME_PLACEHOLDER), (None, None)
)
class HfApiCommonTestWithLogin(HfApiCommonTest):
@classmethod
def setUpClass(cls):
"""
Share this valid token in all tests below.
"""
cls._token = cls._api.login(username=USER, password=<PASSWORD>)
class HfApiEndpointsTest(HfApiCommonTestWithLogin):
def test_whoami(self):
info = self._api.whoami(token=self._token)
self.assertEqual(info["name"], USER)
self.assertEqual(info["fullname"], FULL_NAME)
self.assertIsInstance(info["orgs"], list)
self.assertIsInstance(info["orgs"][0]["apiToken"], str)
def test_create_update_and_delete_repo(self):
REPO_NAME = repo_name("crud")
self._api.create_repo(name=REPO_NAME, token=self._token)
res = self._api.update_repo_visibility(
name=REPO_NAME, token=self._token, private=True
)
self.assertTrue(res["private"])
res = self._api.update_repo_visibility(
name=REPO_NAME, token=self._token, private=False
)
self.assertFalse(res["private"])
self._api.delete_repo(name=REPO_NAME, token=self._token)
def test_create_update_and_delete_dataset_repo(self):
DATASET_REPO_NAME = dataset_repo_name("crud")
self._api.create_repo(
name=DATASET_REPO_NAME, token=self._token, repo_type=REPO_TYPE_DATASET
)
res = self._api.update_repo_visibility(
name=DATASET_REPO_NAME,
token=self._token,
private=True,
repo_type=REPO_TYPE_DATASET,
)
self.assertTrue(res["private"])
res = self._api.update_repo_visibility(
name=DATASET_REPO_NAME,
token=self._token,
private=False,
repo_type=REPO_TYPE_DATASET,
)
self.assertFalse(res["private"])
self._api.delete_repo(
name=DATASET_REPO_NAME, token=self._token, repo_type=REPO_TYPE_DATASET
)
def test_create_update_and_delete_space_repo(self):
SPACE_REPO_NAME = space_repo_name("failing")
with pytest.raises(ValueError, match=r"No space_sdk provided.*"):
self._api.create_repo(
token=self._token,
name=SPACE_REPO_NAME,
repo_type=REPO_TYPE_SPACE,
space_sdk=None,
)
with pytest.raises(ValueError, match=r"Invalid space_sdk.*"):
self._api.create_repo(
token=self._token,
name=SPACE_REPO_NAME,
repo_type=REPO_TYPE_SPACE,
space_sdk="asdfasdf",
)
for sdk in SPACES_SDK_TYPES:
SPACE_REPO_NAME = space_repo_name(sdk)
self._api.create_repo(
name=SPACE_REPO_NAME,
token=self._token,
repo_type=REPO_TYPE_SPACE,
space_sdk=sdk,
)
res = self._api.update_repo_visibility(
name=SPACE_REPO_NAME,
token=self._token,
private=True,
repo_type=REPO_TYPE_SPACE,
)
self.assertTrue(res["private"])
res = self._api.update_repo_visibility(
name=SPACE_REPO_NAME,
token=self._token,
private=False,
repo_type=REPO_TYPE_SPACE,
)
self.assertFalse(res["private"])
self._api.delete_repo(
name=SPACE_REPO_NAME, token=self._token, repo_type=REPO_TYPE_SPACE
)
class HfApiUploadFileTest(HfApiCommonTestWithLogin):
def setUp(self) -> None:
super().setUp()
self.tmp_dir = tempfile.mkdtemp()
self.tmp_file = os.path.join(self.tmp_dir, "temp")
self.tmp_file_content = "Content of the file"
with open(self.tmp_file, "w+") as f:
f.write(self.tmp_file_content)
self.addCleanup(
lambda: shutil.rmtree(self.tmp_dir, onerror=set_write_permission_and_retry)
)
def test_upload_file_validation(self):
REPO_NAME = repo_name("upload")
with self.assertRaises(ValueError, msg="Wrong repo type"):
self._api.upload_file(
path_or_fileobj=self.tmp_file,
path_in_repo="README.md",
repo_id=f"{USER}/{REPO_NAME}",
repo_type="this type does not exist",
token=self._token,
)
with self.assertRaises(ValueError, msg="File opened in text mode"):
with open(self.tmp_file, "rt") as ftext:
self._api.upload_file(
path_or_fileobj=ftext,
path_in_repo="README.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
with self.assertRaises(
ValueError, msg="path_or_fileobj is str but does not point to a file"
):
self._api.upload_file(
path_or_fileobj=os.path.join(self.tmp_dir, "nofile.pth"),
path_in_repo="README.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
for (invalid_path, msg) in [
("Remote\\README.md", "Has a backslash"),
("/Remote/README.md", "Starts with a slash"),
("Remote/../subtree/./README.md", "Has relative parts"),
]:
with self.subTest(msg=msg):
with self.assertRaises(ValueError, msg="path_in_repo is invalid"):
self._api.upload_file(
path_or_fileobj=self.tmp_file,
path_in_repo=invalid_path,
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
def test_upload_file_path(self):
REPO_NAME = repo_name("path")
self._api.create_repo(token=self._token, name=REPO_NAME)
try:
self._api.upload_file(
path_or_fileobj=self.tmp_file,
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
url = "{}/{user}/{repo}/resolve/main/temp/new_file.md".format(
ENDPOINT_STAGING,
user=USER,
repo=REPO_NAME,
)
filepath = cached_download(url, force_download=True)
with open(filepath) as downloaded_file:
content = downloaded_file.read()
self.assertEqual(content, self.tmp_file_content)
except Exception as err:
self.fail(err)
finally:
self._api.delete_repo(name=REPO_NAME, token=self._token)
def test_upload_file_fileobj(self):
REPO_NAME = repo_name("fileobj")
self._api.create_repo(name=REPO_NAME, token=self._token)
try:
with open(self.tmp_file, "rb") as filestream:
self._api.upload_file(
path_or_fileobj=filestream,
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
url = "{}/{user}/{repo}/resolve/main/temp/new_file.md".format(
ENDPOINT_STAGING,
user=USER,
repo=REPO_NAME,
)
filepath = cached_download(url, force_download=True)
with open(filepath) as downloaded_file:
content = downloaded_file.read()
self.assertEqual(content, self.tmp_file_content)
except Exception as err:
self.fail(err)
finally:
self._api.delete_repo(name=REPO_NAME, token=self._token)
def test_upload_file_bytesio(self):
REPO_NAME = repo_name("bytesio")
self._api.create_repo(name=REPO_NAME, token=self._token)
try:
filecontent = BytesIO(b"File content, but in bytes IO")
self._api.upload_file(
path_or_fileobj=filecontent,
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
url = "{}/{user}/{repo}/resolve/main/temp/new_file.md".format(
ENDPOINT_STAGING,
user=USER,
repo=REPO_NAME,
)
filepath = cached_download(url, force_download=True)
with open(filepath) as downloaded_file:
content = downloaded_file.read()
self.assertEqual(content, filecontent.getvalue().decode())
except Exception as err:
self.fail(err)
finally:
self._api.delete_repo(name=REPO_NAME, token=self._token)
def test_upload_file_conflict(self):
REPO_NAME = repo_name("conflict")
self._api.create_repo(name=REPO_NAME, token=self._token)
try:
filecontent = BytesIO(b"File content, but in bytes IO")
self._api.upload_file(
path_or_fileobj=filecontent,
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
identical_ok=True,
)
# No exception raised when identical_ok is True
self._api.upload_file(
path_or_fileobj=filecontent,
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
identical_ok=True,
)
with self.assertRaises(HTTPError) as err_ctx:
self._api.upload_file(
path_or_fileobj=filecontent,
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
identical_ok=False,
)
self.assertEqual(err_ctx.exception.response.status_code, 409)
except Exception as err:
self.fail(err)
finally:
self._api.delete_repo(name=REPO_NAME, token=self._token)
def test_upload_buffer(self):
REPO_NAME = repo_name("buffer")
self._api.create_repo(name=REPO_NAME, token=self._token)
try:
buffer = BytesIO()
buffer.write(self.tmp_file_content.encode())
self._api.upload_file(
path_or_fileobj=buffer.getvalue(),
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
url = "{}/{user}/{repo}/resolve/main/temp/new_file.md".format(
ENDPOINT_STAGING,
user=USER,
repo=REPO_NAME,
)
filepath = cached_download(url, force_download=True)
with open(filepath) as downloaded_file:
content = downloaded_file.read()
self.assertEqual(content, self.tmp_file_content)
except Exception as err:
self.fail(err)
finally:
self._api.delete_repo(name=REPO_NAME, token=self._token)
def test_delete_file(self):
REPO_NAME = repo_name("delete")
self._api.create_repo(token=self._token, name=REPO_NAME)
try:
self._api.upload_file(
path_or_fileobj=self.tmp_file,
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
self._api.delete_file(
path_in_repo="temp/new_file.md",
repo_id=f"{USER}/{REPO_NAME}",
token=self._token,
)
with self.assertRaises(HTTPError):
# Should raise a 404
hf_hub_download(f"{USER}/{REPO_NAME}", "temp/new_file.md")
except Exception as err:
self.fail(err)
finally:
self._api.delete_repo(name=REPO_NAME, token=self._token)
def test_get_full_repo_name(self):
repo_name_with_no_org = self._api.get_full_repo_name("model", token=self._token)
self.assertEqual(repo_name_with_no_org, f"{USER}/model")
repo_name_with_no_org = self._api.get_full_repo_name(
"model", organization="org", token=self._token
)
self.assertEqual(repo_name_with_no_org, "org/model")
class HfApiPublicTest(unittest.TestCase):
def test_staging_list_models(self):
_api = HfApi(endpoint=ENDPOINT_STAGING)
_ = _api.list_models()
@with_production_testing
def test_list_models(self):
_api = HfApi()
models = _api.list_models()
self.assertGreater(len(models), 100)
self.assertIsInstance(models[0], ModelInfo)
@with_production_testing
def test_list_models_complex_query(self):
# Let's list the 10 most recent models
# with tags "bert" and "jax",
# ordered by last modified date.
_api = HfApi()
models = _api.list_models(
filter=("bert", "jax"), sort="lastModified", direction=-1, limit=10
)
# we have at least 1 models
self.assertGreater(len(models), 1)
self.assertLessEqual(len(models), 10)
model = models[0]
self.assertIsInstance(model, ModelInfo)
self.assertTrue(all(tag in model.tags for tag in ["bert", "jax"]))
@with_production_testing
def test_list_models_with_config(self):
_api = HfApi()
models = _api.list_models(
filter="adapter-transformers", fetch_config=True, limit=20
)
found_configs = 0
for model in models:
if model.config:
found_configs = found_configs + 1
self.assertGreater(found_configs, 0)
@with_production_testing
def test_model_info(self):
_api = HfApi()
model = _api.model_info(repo_id=DUMMY_MODEL_ID)
self.assertIsInstance(model, ModelInfo)
self.assertNotEqual(model.sha, DUMMY_MODEL_ID_REVISION_ONE_SPECIFIC_COMMIT)
# One particular commit (not the top of `main`)
model = _api.model_info(
repo_id=DUMMY_MODEL_ID, revision=DUMMY_MODEL_ID_REVISION_ONE_SPECIFIC_COMMIT
)
self.assertIsInstance(model, ModelInfo)
self.assertEqual(model.sha, DUMMY_MODEL_ID_REVISION_ONE_SPECIFIC_COMMIT)
@with_production_testing
def test_list_repo_files(self):
_api = HfApi()
files = _api.list_repo_files(repo_id=DUMMY_MODEL_ID)
expected_files = [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"tf_model.h5",
"vocab.json",
]
self.assertListEqual(files, expected_files)
def test_staging_list_datasets(self):
_api = HfApi(endpoint=ENDPOINT_STAGING)
_ = _api.list_datasets()
@with_production_testing
def test_list_datasets(self):
_api = HfApi()
datasets = _api.list_datasets()
self.assertGreater(len(datasets), 100)
self.assertIsInstance(datasets[0], DatasetInfo)
@with_production_testing
def test_list_datasets_full(self):
_api = HfApi()
datasets = _api.list_datasets(full=True)
self.assertGreater(len(datasets), 100)
dataset = datasets[0]
self.assertIsInstance(dataset, DatasetInfo)
self.assertTrue(any(dataset.card_data for dataset in datasets))
@with_production_testing
def test_dataset_info(self):
_api = HfApi()
dataset = _api.dataset_info(repo_id=DUMMY_DATASET_ID)
self.assertTrue(
isinstance(dataset.card_data, dict) and len(dataset.card_data) > 0
)
self.assertTrue(
isinstance(dataset.siblings, list) and len(dataset.siblings) > 0
)
self.assertIsInstance(dataset, DatasetInfo)
self.assertNotEqual(dataset.sha, DUMMY_DATASET_ID_REVISION_ONE_SPECIFIC_COMMIT)
dataset = _api.dataset_info(
repo_id=DUMMY_DATASET_ID,
revision=DUMMY_DATASET_ID_REVISION_ONE_SPECIFIC_COMMIT,
)
self.assertIsInstance(dataset, DatasetInfo)
self.assertEqual(dataset.sha, DUMMY_DATASET_ID_REVISION_ONE_SPECIFIC_COMMIT)
def test_staging_list_metrics(self):
_api = HfApi(endpoint=ENDPOINT_STAGING)
_ = _api.list_metrics()
@with_production_testing
def test_list_metrics(self):
_api = HfApi()
metrics = _api.list_metrics()
self.assertGreater(len(metrics), 10)
self.assertIsInstance(metrics[0], MetricInfo)
self.assertTrue(any(metric.description for metric in metrics))
class HfApiPrivateTest(HfApiCommonTestWithLogin):
def setUp(self) -> None:
super().setUp()
self.REPO_NAME = repo_name("private")
self._api.create_repo(name=self.REPO_NAME, token=self._token, private=True)
def tearDown(self) -> None:
self._api.delete_repo(name=self.REPO_NAME, token=self._token)
def test_model_info(self):
shutil.rmtree(os.path.dirname(HfFolder.path_token))
# Test we cannot access model info without a token
with self.assertRaisesRegex(requests.exceptions.HTTPError, "404 Client Error"):
_ = self._api.model_info(repo_id=f"{USER}/{self.REPO_NAME}")
# Test we can access model info with a token
model_info = self._api.model_info(
repo_id=f"{USER}/{self.REPO_NAME}", token=self._token
)
self.assertIsInstance(model_info, ModelInfo)
class HfFolderTest(unittest.TestCase):
def test_token_workflow(self):
"""
Test the whole token save/get/delete workflow,
with the desired behavior with respect to non-existent tokens.
"""
token = "token-{}".format(int(time.time()))
HfFolder.save_token(token)
self.assertEqual(HfFolder.get_token(), token)
HfFolder.delete_token()
HfFolder.delete_token()
# ^^ not an error, we test that the
# second call does not fail.
self.assertEqual(HfFolder.get_token(), None)
@require_git_lfs
class HfLargefilesTest(HfApiCommonTest):
@classmethod
def setUpClass(cls):
"""
Share this valid token in all tests below.
"""
cls._token = cls._api.login(username=USER, password=<PASSWORD>)
def setUp(self):
self.REPO_NAME_LARGE_FILE = repo_name_large_file()
try:
shutil.rmtree(WORKING_REPO_DIR, onerror=set_write_permission_and_retry)
except FileNotFoundError:
pass
def tearDown(self):
self._api.delete_repo(name=self.REPO_NAME_LARGE_FILE, token=self._token)
def setup_local_clone(self, REMOTE_URL):
REMOTE_URL_AUTH = REMOTE_URL.replace(
ENDPOINT_STAGING, ENDPOINT_STAGING_BASIC_AUTH
)
subprocess.run(
["git", "clone", REMOTE_URL_AUTH, WORKING_REPO_DIR],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
subprocess.run(
["git", "lfs", "track", "*.pdf"], check=True, cwd=WORKING_REPO_DIR
)
subprocess.run(
["git", "lfs", "track", "*.epub"], check=True, cwd=WORKING_REPO_DIR
)
def test_end_to_end_thresh_6M(self):
REMOTE_URL = self._api.create_repo(
name=self.REPO_NAME_LARGE_FILE,
token=self._token,
lfsmultipartthresh=6 * 10 ** 6,
)
self.setup_local_clone(REMOTE_URL)
subprocess.run(
["wget", LARGE_FILE_18MB],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
| |
crnt_exps = exps_df[fields]
crnt_exps = crnt_exps.merge(gene_fit_d['q'][["name","u","short","maxFit","gMean"]])
new_high = high_df.merge(crnt_exps, left_on="expName", right_on="name")
check_bool = [bool(new_high['gMean'].iloc[ix] >= min_gMean and \
new_high['fit'].iloc[ix] >= new_high['maxFit'].iloc[ix] - max_below) \
for ix, val in new_high['gMean'].items()]
new_high = new_high[check_bool]
new_high = new_high.merge(genes_df[["locusId","sysName","desc"]])
new_high = new_high.iloc[py_order(list(high_df['expName']), tie_breaker=list(-1*high_df['fit']))]
if dbg_prnt:
new_high.to_csv("tmp/py_new_high_df.tsv", sep="\t", index=False)
return new_high
"""
# Note thresholds are different than in high_fit.pl
HighFit = function(fit, genes, expsUsed, min.fit=4, min.t=5, max.se=2, min.gMean=10, max.below=8) {
# wHigh is a dataframe with two columns, one called 'rows', and one called 'columns'
wHigh = which(fit$lrn >= min.fit & fit$t >= min.t, arr.ind=T);
high = data.frame(locusId=fit$g[wHigh[,1]], expName=names(fit$lrn)[wHigh[,2]], fit=fit$lrn[wHigh], t=fit$t[wHigh]);
# t ~= fit/standard_error, so estimate s.e. = fit/t
high$se = high$fit/high$t;
high$sdNaive = fit$sdNaive[wHigh];
high = subset(high, se <= max.se);
# which experiments are ok
fields = words("name Group Condition_1 Concentration_1 Units_1 Media short");
fields = fields[fields %in% names(expsUsed)];
exps = expsUsed[, fields];
exps = merge(exps, fit$q[,words("name u short maxFit gMean")]);
high = merge(high, exps, by.x="expName", by.y="name");
high = subset(high, gMean >= min.gMean & fit >= maxFit - max.below);
names(high)[names(high)=="u"] = "used";
high = merge(genes[,c("locusId","sysName","desc")], high);
high = high[order(high$expName, -high$fit),];
return(high);
}
"""
def getGenesPerScaffold(genes_df, genesUsed):
"""
Args:
genes_df: Dataframe of genes.GC
genesUsed: list<locusId (str)>
Returns:
genesPerScaffold:
genesPerScaffold is a dict with scaffold -> number of genes found in that scaffold
function py_table comes from file 'translate_R_to_pandas'
"""
#We iterate over every row of genes_df and find locations of genesUsed locusIds
rows_with_locus_Ids_in_genesUsed_bool = [genes_df['locusId'][i] in genesUsed \
for i in range(len(genes_df['locusId']))]
genesPerScaffold = py_table(list(genes_df['scaffoldId'][rows_with_locus_Ids_in_genesUsed_bool]
))
return genesPerScaffold
def check_if_every_t0set_is_in_t0tot(exps_df, t0tot):
"""
Args:
exps_df:
Dataframe of FEBABarSeq.tsv
t0tot: data frame where column names are 'date setname'
and linked to a list of sums over the indexes that relate
to that setname, with the list length being equal to the
total number of strains (barcodes) in all.poolcount
all columns are t0's?
"""
# We check if every t0set is in t0tot
#{datesetname:[] for datesetname in expsT0.keys()}
incorrect_sets = []
for t0set in exps_df['t0set'].array:
if t0set not in t0tot.head():
incorrect_sets.append(t0set)
if len(incorrect_sets) > 0:
raise Exception("incorrect t0sets: \n" + ", ".join(incorrect_sets))
def get_GenesUsed12(genesUsed12, minT0Gene, strainsUsed, all_df,
t0tot):
"""
We get the locusIds which have insertions under 0.5 and over
0.5 within the gene (percentage of length) and with values
over the minT0Gene
Args:
genesUsed12: None or list<locusId (str)>
minT0Gene: int
strainsUsed: list<bool>
all_df: Dataframe needs col (f)
t0tot: data frame where column names are 'date setname'
and linked to a list of sums over the indexes that relate
to that setname, with the list length being equal to the
total number of strains (barcodes) in all.poolcount
all columns are t0's?
Returns:
genesUsed12: list of locusIds that have both high f (>0.5) and low f (<0.5)
insertions with enough abundance of insertions on both sides
"""
if genesUsed12 is None:
minT0GeneSide = minT0Gene/2
# d1t0tot captures t0tot whose strains have f < 0.5 and True in strainsUsed
stUsed_and_f_low = [strainsUsed[i] and all_df['f'].iloc[i] < 0.5 for i \
in range(len(strainsUsed))]
d1, d1_row_min_bool = get_non_locusIdSumsForGene12(minT0GeneSide, t0tot, all_df,
stUsed_and_f_low)
# d2t0tot captures t0tot whose strains have f >= 0.5 and True in strainsUsed
stUsed_and_f_high = [strainsUsed[i] and all_df['f'].iloc[i] >= 0.5 for i
in range(len(strainsUsed))]
d2, d2_row_min_bool = get_non_locusIdSumsForGene12(minT0GeneSide, t0tot, all_df,
stUsed_and_f_high)
genesUsed12 = list(
set(d1['locusId'][d1_row_min_bool]).intersection(
set(d2['locusId'][d2_row_min_bool]))
)
# Should the counts for each half of the gene (d1,d2) be saved as a diagnostic?
# t0_gN should be enough for now
if (len(genesUsed12) < 100):
raise Exception(
f"Length of genesUsed12 is less than 100. Value: {len(genesUsed12)}"
)
return genesUsed12
def get_non_locusIdSumsForGene12(minT0GeneSide, t0tot, all_df, stUsed_and_good_f):
"""
Args:
minT0GeneSide (int): int
t0tot (pandas DataFrame): DataFrame of t0 aggregates
all_df (pandas DataFrame):
stUsed_and_good_f list(bool): A list of length all_df and t0tot (which are equivalent
in the number of rows they have), which indicates
which strains we care about now.
Returns:
crt:
crt_row_min_bool:
"""
crtt0tot = t0tot[stUsed_and_good_f]
crtt0tot['locusId'] = all_df['locusId'][stUsed_and_good_f]
crt = py_aggregate(crtt0tot,
'locusId',
'sum')
crt_mins = crt.loc[:, crt.columns != 'locusId'].min(axis=1)
#print(crt_mins)
crt_row_min_bool = [x >= minT0GeneSide for x in list(crt_mins)]
return crt, crt_row_min_bool
def print_info2(has_gene2, all_df, strainsUsed, genesUsed):
"""
Args:
has_gene2: list<bool>
all_df: DataFrame of all.poolcount
strainsUsed: list<bool>
genesUsed: list<locusId (str)>
"""
# We count the number of Trues in has_gene2
num_true_has_gene2 = has_gene2.count(True)
num_unique_locus_Ids = len(all_df['locusId'][has_gene2].unique())
logging.info(f"Using {str(len(strainsUsed))} of {num_true_has_gene2} genic strains.")
logging.info(f"Using {len(genesUsed)} of {num_unique_locus_Ids} genes with data.")
return None
def remove_genes_if_not_in_genes_df(genesUsed_list, genes_df):
"""
We currently check if a single gene from genesUsed_list is in genes_df;
we also return a list of all genes that Aren't in genes_df
Args:
genesUsed_list: list<locusId (str)>
genes_df: Dataframe of genes.GC file (~12 columns)
Returns:
genesUsed_list: list<locusId (str)>
genes_in_genes_df_bool: boolean which says if there is a gene in genesUsed_list
which is also in genes_in_genes_df_bool
"""
genes_in_genes_df_bool = True
all_genes_locus_id = list(genes_df['locusId'])
genes_not_in_genes_df = []
for x in genesUsed_list:
if x not in all_genes_locus_id:
genes_not_in_genes_df.append(x)
genesUsed_list.remove(x)
if len(genesUsed_list) < 10 or (not genes_in_genes_df_bool):
logging.info("genesUsed_list")
logging.info(genesUsed_list)
raise Exception(f"Less than 10 genes left, exiting program: {len(genesUsed_list)}")
if len(genes_not_in_genes_df) > 0:
logging.critical("Gene Locus Ids not in the genes.GC file: \n"
", ".join(genes_not_in_genes_df) + "\n")
return genesUsed_list
def get_smallScaffold(genesPerScaffold, minGenesPerScaffold, genes_df,
debug_print_bool=False):
"""
Args:
genesPerScaffold: dict scaffold -> number of genes in that scaffold
minGenesPerScaffold: int
genes_df: dataframe of genes.GC
Returns:
smallScaffold: list<scaffold_name (str)> whose number of genes
in the scaffold is less than minGenesPerScaffold (the minimum)
smallLocusIds: list<locusId str> All LocusIds related to scaffolds in smallScaffold
"""
# This is a list of scaffold Names (str) whose gene number is too low
smallScaffold = []
for k, v in enumerate(genesPerScaffold):
if v < minGenesPerScaffold:
smallScaffold.append(k)
if debug_print_bool:
debug_print(smallScaffold, 'smallScaffold')
if len(smallScaffold) > 0:
logging.info("Ignoring genes on small scaffolds "
", ".join(smallScaffold) + " " + \
"\ngenes left: " + str(len(genesUsed)) + "\n");
smallLocus_Ids = []
for index, row in genes_df.iterrows():
current_scaffold = row['scaffoldId']
current_locus_id = row['locusId']
if current_scaffold in smallScaffold:
smallLocus_Ids.append(current_locus_id)
return smallScaffold, smallLocus_Ids
def getGenesUsed(t0tot, strainsUsed, all_df, minT0Gene, genesUsed,
debug_print_bool=False):
""" We create the variable genesUsed
Args:
t0tot: A Dataframe which contains datesetname: [sum1, sum2,
...] for datesetname in expsT0.keys(),
i.e. A dataframe with timezeros datesetnames
The number of rows in the data frame is equal
to the number of rows in all_df.
Does not contain cols besides datesetnames.
Contains sum over all samples that match into a datesetname
that is a 'Time0'
strainsUsed: list<bool> length of which is the same as all_df and t0tot
all_df: needs col locusId
minT0Gene: (int)
genesUsed: either None or a list of locusIds to be used
Returns:
genesUsed: list of unique locusIds such that their mean Time0 values
is greater than minT0Gene
Description:
We take the t0tot (Time0 totals), we take the strainsUsed from that
and add a related column with locusIds from all_df.
Then we sum these up over the locusIds, so the number of rows
in t0_gN_used will be the same as the total number of unique
locusIds in unique_usable_locus_ids
"""
# genesUsed is either None or a list of locusIds to be used
pre_t0_gn_used = t0tot[strainsUsed]
pre_t0_gn_used['locusId'] = list(all_df['locusId'][strainsUsed])
if genesUsed is None:
# t0_gN_used is
t0_gN_used = py_aggregate(pre_t0_gn_used,
'locusId',
func='sum'
)
if debug_print_bool:
t0_gN_used.to_csv("tmp/py_t0_gN_used.tsv", index=False, sep="\t")
# n0 is a pandas series (?) with the means of rows in t0_gN_used which are sums over
#
n0 = t0_gN_used.iloc[:,t0_gN_used.columns != 'locusId'].mean(axis=1)
logging.info(f"Time0 reads per gene: mean {statistics.mean(n0)}"
f"median: {statistics.median(n0)} "
f" ratio: {statistics.mean(n0)}/{statistics.median(n0)}")
# Below is boolean list of locations where the row mean passes minT0Gene
genesUsedpre = [(n0.iloc[i] >= minT0Gene) for i in range(n0.shape[0])]
#print(genesUsedpre[:100])
genesUsed = t0_gN_used['locusId'][genesUsedpre]
if debug_print_bool:
genesUsed.to_csv("tmp/py_genesUsed.tsv", sep="\t")
return genesUsed
def createStrainsUsed(t0tot, minT0Strain, has_gene2, strainsUsed):
""" Create the variable strainsUsed - uses existing var if not None
We make strainsUsed a list which contains True or False values for
each strain in all_df such that both the strain has an insertion
centrally in a gene (meaning .1<f<.9) AND that the average | |
style path string into seq of commands.
"""
result = dll.pixie_parse_path(path.encode("utf8"))
if check_error():
raise PixieError(take_error())
return result
def miter_limit_to_angle(limit):
"""
Converts miter-limit-ratio to miter-limit-angle.
"""
result = dll.pixie_miter_limit_to_angle(limit)
return result
def angle_to_miter_limit(angle):
"""
Converts miter-limit-angle to miter-limit-ratio.
"""
result = dll.pixie_angle_to_miter_limit(angle)
return result
def parse_color(s):
result = dll.pixie_parse_color(s.encode("utf8"))
if check_error():
raise PixieError(take_error())
return result
def translate(x, y):
result = dll.pixie_translate(x, y)
return result
def rotate(angle):
result = dll.pixie_rotate(angle)
return result
def scale(x, y):
result = dll.pixie_scale(x, y)
return result
def inverse(m):
result = dll.pixie_inverse(m)
return result
dll.pixie_check_error.argtypes = []
dll.pixie_check_error.restype = c_bool
dll.pixie_take_error.argtypes = []
dll.pixie_take_error.restype = c_char_p
dll.pixie_matrix_3.argtypes = []
dll.pixie_matrix_3.restype = Matrix3
dll.pixie_matrix_3_mul.argtypes = [Matrix3, Matrix3]
dll.pixie_matrix_3_mul.restype = Matrix3
dll.pixie_seq_float_32_unref.argtypes = [SeqFloat32]
dll.pixie_seq_float_32_unref.restype = None
dll.pixie_new_seq_float_32.argtypes = []
dll.pixie_new_seq_float_32.restype = c_ulonglong
dll.pixie_seq_float_32_len.argtypes = [SeqFloat32]
dll.pixie_seq_float_32_len.restype = c_longlong
dll.pixie_seq_float_32_get.argtypes = [SeqFloat32, c_longlong]
dll.pixie_seq_float_32_get.restype = c_float
dll.pixie_seq_float_32_set.argtypes = [SeqFloat32, c_longlong, c_float]
dll.pixie_seq_float_32_set.restype = None
dll.pixie_seq_float_32_delete.argtypes = [SeqFloat32, c_longlong]
dll.pixie_seq_float_32_delete.restype = None
dll.pixie_seq_float_32_add.argtypes = [SeqFloat32, c_float]
dll.pixie_seq_float_32_add.restype = None
dll.pixie_seq_float_32_clear.argtypes = [SeqFloat32]
dll.pixie_seq_float_32_clear.restype = None
dll.pixie_seq_span_unref.argtypes = [SeqSpan]
dll.pixie_seq_span_unref.restype = None
dll.pixie_new_seq_span.argtypes = []
dll.pixie_new_seq_span.restype = c_ulonglong
dll.pixie_seq_span_len.argtypes = [SeqSpan]
dll.pixie_seq_span_len.restype = c_longlong
dll.pixie_seq_span_get.argtypes = [SeqSpan, c_longlong]
dll.pixie_seq_span_get.restype = Span
dll.pixie_seq_span_set.argtypes = [SeqSpan, c_longlong, Span]
dll.pixie_seq_span_set.restype = None
dll.pixie_seq_span_delete.argtypes = [SeqSpan, c_longlong]
dll.pixie_seq_span_delete.restype = None
dll.pixie_seq_span_add.argtypes = [SeqSpan, Span]
dll.pixie_seq_span_add.restype = None
dll.pixie_seq_span_clear.argtypes = [SeqSpan]
dll.pixie_seq_span_clear.restype = None
dll.pixie_seq_span_typeset.argtypes = [SeqSpan, Vector2, HorizontalAlignment, VerticalAlignment, c_bool]
dll.pixie_seq_span_typeset.restype = Arrangement
dll.pixie_seq_span_compute_bounds.argtypes = [SeqSpan]
dll.pixie_seq_span_compute_bounds.restype = Vector2
dll.pixie_image_unref.argtypes = [Image]
dll.pixie_image_unref.restype = None
dll.pixie_new_image.argtypes = [c_longlong, c_longlong]
dll.pixie_new_image.restype = c_ulonglong
dll.pixie_image_get_width.argtypes = [Image]
dll.pixie_image_get_width.restype = c_longlong
dll.pixie_image_set_width.argtypes = [Image, c_longlong]
dll.pixie_image_set_width.restype = None
dll.pixie_image_get_height.argtypes = [Image]
dll.pixie_image_get_height.restype = c_longlong
dll.pixie_image_set_height.argtypes = [Image, c_longlong]
dll.pixie_image_set_height.restype = None
dll.pixie_image_write_file.argtypes = [Image, c_char_p]
dll.pixie_image_write_file.restype = None
dll.pixie_image_copy.argtypes = [Image]
dll.pixie_image_copy.restype = Image
dll.pixie_image_get_color.argtypes = [Image, c_longlong, c_longlong]
dll.pixie_image_get_color.restype = Color
dll.pixie_image_set_color.argtypes = [Image, c_longlong, c_longlong, Color]
dll.pixie_image_set_color.restype = None
dll.pixie_image_fill.argtypes = [Image, Color]
dll.pixie_image_fill.restype = None
dll.pixie_image_flip_horizontal.argtypes = [Image]
dll.pixie_image_flip_horizontal.restype = None
dll.pixie_image_flip_vertical.argtypes = [Image]
dll.pixie_image_flip_vertical.restype = None
dll.pixie_image_sub_image.argtypes = [Image, c_longlong, c_longlong, c_longlong, c_longlong]
dll.pixie_image_sub_image.restype = Image
dll.pixie_image_minify_by_2.argtypes = [Image, c_longlong]
dll.pixie_image_minify_by_2.restype = Image
dll.pixie_image_magnify_by_2.argtypes = [Image, c_longlong]
dll.pixie_image_magnify_by_2.restype = Image
dll.pixie_image_apply_opacity.argtypes = [Image, c_float]
dll.pixie_image_apply_opacity.restype = None
dll.pixie_image_invert.argtypes = [Image]
dll.pixie_image_invert.restype = None
dll.pixie_image_blur.argtypes = [Image, c_float, Color]
dll.pixie_image_blur.restype = None
dll.pixie_image_new_mask.argtypes = [Image]
dll.pixie_image_new_mask.restype = Mask
dll.pixie_image_resize.argtypes = [Image, c_longlong, c_longlong]
dll.pixie_image_resize.restype = Image
dll.pixie_image_shadow.argtypes = [Image, Vector2, c_float, c_float, Color]
dll.pixie_image_shadow.restype = Image
dll.pixie_image_super_image.argtypes = [Image, c_longlong, c_longlong, c_longlong, c_longlong]
dll.pixie_image_super_image.restype = Image
dll.pixie_image_draw.argtypes = [Image, Image, Matrix3, BlendMode]
dll.pixie_image_draw.restype = None
dll.pixie_image_mask_draw.argtypes = [Image, Mask, Matrix3, BlendMode]
dll.pixie_image_mask_draw.restype = None
dll.pixie_image_fill_gradient.argtypes = [Image, Paint]
dll.pixie_image_fill_gradient.restype = None
dll.pixie_image_fill_text.argtypes = [Image, Font, c_char_p, Matrix3, Vector2, HorizontalAlignment, VerticalAlignment]
dll.pixie_image_fill_text.restype = None
dll.pixie_image_arrangement_fill_text.argtypes = [Image, Arrangement, Matrix3]
dll.pixie_image_arrangement_fill_text.restype = None
dll.pixie_image_stroke_text.argtypes = [Image, Font, c_char_p, Matrix3, c_float, Vector2, HorizontalAlignment, VerticalAlignment, LineCap, LineJoin, c_float, SeqFloat32]
dll.pixie_image_stroke_text.restype = None
dll.pixie_image_arrangement_stroke_text.argtypes = [Image, Arrangement, Matrix3, c_float, LineCap, LineJoin, c_float, SeqFloat32]
dll.pixie_image_arrangement_stroke_text.restype = None
dll.pixie_image_fill_path.argtypes = [Image, Path, Paint, Matrix3, WindingRule]
dll.pixie_image_fill_path.restype = None
dll.pixie_image_stroke_path.argtypes = [Image, Path, Paint, Matrix3, c_float, LineCap, LineJoin, c_float, SeqFloat32]
dll.pixie_image_stroke_path.restype = None
dll.pixie_image_new_context.argtypes = [Image]
dll.pixie_image_new_context.restype = Context
dll.pixie_mask_unref.argtypes = [Mask]
dll.pixie_mask_unref.restype = None
dll.pixie_new_mask.argtypes = [c_longlong, c_longlong]
dll.pixie_new_mask.restype = c_ulonglong
dll.pixie_mask_get_width.argtypes = [Mask]
dll.pixie_mask_get_width.restype = c_longlong
dll.pixie_mask_set_width.argtypes = [Mask, c_longlong]
dll.pixie_mask_set_width.restype = None
dll.pixie_mask_get_height.argtypes = [Mask]
dll.pixie_mask_get_height.restype = c_longlong
dll.pixie_mask_set_height.argtypes = [Mask, c_longlong]
dll.pixie_mask_set_height.restype = None
dll.pixie_mask_write_file.argtypes = [Mask, c_char_p]
dll.pixie_mask_write_file.restype = None
dll.pixie_mask_copy.argtypes = [Mask]
dll.pixie_mask_copy.restype = Mask
dll.pixie_mask_get_value.argtypes = [Mask, c_longlong, c_longlong]
dll.pixie_mask_get_value.restype = c_ubyte
dll.pixie_mask_set_value.argtypes = [Mask, c_longlong, c_longlong, c_ubyte]
dll.pixie_mask_set_value.restype = None
dll.pixie_mask_fill.argtypes = [Mask, c_ubyte]
dll.pixie_mask_fill.restype = None
dll.pixie_mask_minify_by_2.argtypes = [Mask, c_longlong]
dll.pixie_mask_minify_by_2.restype = Mask
dll.pixie_mask_magnify_by_2.argtypes = [Mask, c_longlong]
dll.pixie_mask_magnify_by_2.restype = Mask
dll.pixie_mask_spread.argtypes = [Mask, c_float]
dll.pixie_mask_spread.restype = None
dll.pixie_mask_ceil.argtypes = [Mask]
dll.pixie_mask_ceil.restype = None
dll.pixie_mask_new_image.argtypes = [Mask]
dll.pixie_mask_new_image.restype = Image
dll.pixie_mask_apply_opacity.argtypes = [Mask, c_float]
dll.pixie_mask_apply_opacity.restype = None
dll.pixie_mask_invert.argtypes = [Mask]
dll.pixie_mask_invert.restype = None
dll.pixie_mask_blur.argtypes = [Mask, c_float, c_ubyte]
dll.pixie_mask_blur.restype = None
dll.pixie_mask_draw.argtypes = [Mask, Mask, Matrix3, BlendMode]
dll.pixie_mask_draw.restype = None
dll.pixie_mask_image_draw.argtypes = [Mask, Image, Matrix3, BlendMode]
dll.pixie_mask_image_draw.restype = None
dll.pixie_mask_fill_text.argtypes = [Mask, Font, c_char_p, Matrix3, Vector2, HorizontalAlignment, VerticalAlignment]
dll.pixie_mask_fill_text.restype = None
dll.pixie_mask_arrangement_fill_text.argtypes = [Mask, Arrangement, Matrix3]
dll.pixie_mask_arrangement_fill_text.restype = None
dll.pixie_mask_stroke_text.argtypes = [Mask, Font, c_char_p, Matrix3, c_float, Vector2, HorizontalAlignment, VerticalAlignment, LineCap, LineJoin, c_float, SeqFloat32]
dll.pixie_mask_stroke_text.restype = None
dll.pixie_mask_arrangement_stroke_text.argtypes = [Mask, Arrangement, Matrix3, c_float, LineCap, LineJoin, c_float, SeqFloat32]
dll.pixie_mask_arrangement_stroke_text.restype = None
dll.pixie_mask_fill_path.argtypes = [Mask, Path, Matrix3, WindingRule, BlendMode]
dll.pixie_mask_fill_path.restype = None
dll.pixie_mask_stroke_path.argtypes = [Mask, Path, Matrix3, c_float, LineCap, LineJoin, c_float, SeqFloat32, BlendMode]
dll.pixie_mask_stroke_path.restype = None
dll.pixie_paint_unref.argtypes = [Paint]
dll.pixie_paint_unref.restype = None
dll.pixie_new_paint.argtypes = [PaintKind]
dll.pixie_new_paint.restype = c_ulonglong
dll.pixie_paint_get_kind.argtypes = [Paint]
dll.pixie_paint_get_kind.restype = PaintKind
dll.pixie_paint_set_kind.argtypes = [Paint, PaintKind]
dll.pixie_paint_set_kind.restype = None
dll.pixie_paint_get_blend_mode.argtypes = [Paint]
dll.pixie_paint_get_blend_mode.restype = BlendMode
dll.pixie_paint_set_blend_mode.argtypes = [Paint, BlendMode]
dll.pixie_paint_set_blend_mode.restype = None
dll.pixie_paint_get_opacity.argtypes = [Paint]
dll.pixie_paint_get_opacity.restype = c_float
dll.pixie_paint_set_opacity.argtypes = [Paint, c_float]
dll.pixie_paint_set_opacity.restype = None
dll.pixie_paint_get_color.argtypes = [Paint]
dll.pixie_paint_get_color.restype = Color
dll.pixie_paint_set_color.argtypes = [Paint, Color]
dll.pixie_paint_set_color.restype = None
dll.pixie_paint_get_image.argtypes = [Paint]
dll.pixie_paint_get_image.restype = Image
dll.pixie_paint_set_image.argtypes = [Paint, Image]
dll.pixie_paint_set_image.restype = None
dll.pixie_paint_get_image_mat.argtypes = [Paint]
dll.pixie_paint_get_image_mat.restype = Matrix3
dll.pixie_paint_set_image_mat.argtypes = [Paint, Matrix3]
dll.pixie_paint_set_image_mat.restype = None
dll.pixie_paint_gradient_handle_positions_len.argtypes = [Paint]
dll.pixie_paint_gradient_handle_positions_len.restype = c_longlong
dll.pixie_paint_gradient_handle_positions_get.argtypes = [Paint, c_longlong]
dll.pixie_paint_gradient_handle_positions_get.restype = Vector2
dll.pixie_paint_gradient_handle_positions_set.argtypes = [Paint, c_longlong, Vector2]
dll.pixie_paint_gradient_handle_positions_set.restype = None
dll.pixie_paint_gradient_handle_positions_delete.argtypes = [Paint, c_longlong]
dll.pixie_paint_gradient_handle_positions_delete.restype = None
dll.pixie_paint_gradient_handle_positions_add.argtypes = [Paint, Vector2]
dll.pixie_paint_gradient_handle_positions_add.restype = None
dll.pixie_paint_gradient_handle_positions_clear.argtypes = [Paint]
dll.pixie_paint_gradient_handle_positions_clear.restype = None
dll.pixie_paint_gradient_stops_len.argtypes = [Paint]
dll.pixie_paint_gradient_stops_len.restype = c_longlong
dll.pixie_paint_gradient_stops_get.argtypes = [Paint, c_longlong]
dll.pixie_paint_gradient_stops_get.restype = ColorStop
dll.pixie_paint_gradient_stops_set.argtypes = [Paint, c_longlong, ColorStop]
dll.pixie_paint_gradient_stops_set.restype = None
dll.pixie_paint_gradient_stops_delete.argtypes = [Paint, c_longlong]
dll.pixie_paint_gradient_stops_delete.restype = None
dll.pixie_paint_gradient_stops_add.argtypes = [Paint, ColorStop]
dll.pixie_paint_gradient_stops_add.restype = None
dll.pixie_paint_gradient_stops_clear.argtypes = [Paint]
dll.pixie_paint_gradient_stops_clear.restype = None
dll.pixie_paint_new_paint.argtypes = [Paint]
dll.pixie_paint_new_paint.restype = Paint
dll.pixie_path_unref.argtypes = [Path]
dll.pixie_path_unref.restype = None
dll.pixie_new_path.argtypes = []
dll.pixie_new_path.restype = c_ulonglong
dll.pixie_path_transform.argtypes = [Path, Matrix3]
dll.pixie_path_transform.restype = None
dll.pixie_path_add_path.argtypes = [Path, Path]
dll.pixie_path_add_path.restype = None
dll.pixie_path_close_path.argtypes = [Path]
dll.pixie_path_close_path.restype = None
dll.pixie_path_compute_bounds.argtypes = [Path, Matrix3]
dll.pixie_path_compute_bounds.restype = Rect
dll.pixie_path_fill_overlaps.argtypes = [Path, Vector2, Matrix3, WindingRule]
dll.pixie_path_fill_overlaps.restype = c_bool
dll.pixie_path_stroke_overlaps.argtypes = [Path, Vector2, Matrix3, c_float, LineCap, LineJoin, c_float, SeqFloat32]
dll.pixie_path_stroke_overlaps.restype = c_bool
dll.pixie_path_move_to.argtypes = [Path, c_float, c_float]
dll.pixie_path_move_to.restype = None
dll.pixie_path_line_to.argtypes = [Path, c_float, c_float]
dll.pixie_path_line_to.restype = None
dll.pixie_path_bezier_curve_to.argtypes = [Path, c_float, c_float, c_float, c_float, c_float, c_float]
dll.pixie_path_bezier_curve_to.restype = None
dll.pixie_path_quadratic_curve_to.argtypes = [Path, c_float, c_float, c_float, c_float]
dll.pixie_path_quadratic_curve_to.restype = None
dll.pixie_path_elliptical_arc_to.argtypes = [Path, c_float, c_float, c_float, c_bool, c_bool, c_float, c_float]
dll.pixie_path_elliptical_arc_to.restype = None
dll.pixie_path_arc.argtypes = [Path, c_float, c_float, c_float, c_float, c_float, c_bool]
dll.pixie_path_arc.restype = None
dll.pixie_path_arc_to.argtypes = [Path, c_float, c_float, c_float, c_float, c_float]
dll.pixie_path_arc_to.restype = None
dll.pixie_path_rect.argtypes = [Path, c_float, c_float, c_float, c_float, c_bool]
dll.pixie_path_rect.restype = None
dll.pixie_path_rounded_rect.argtypes = [Path, c_float, c_float, c_float, c_float, c_float, c_float, c_float, c_float, c_bool]
dll.pixie_path_rounded_rect.restype = None
dll.pixie_path_ellipse.argtypes = [Path, c_float, c_float, c_float, c_float]
dll.pixie_path_ellipse.restype = None
dll.pixie_path_circle.argtypes = [Path, c_float, c_float, c_float]
dll.pixie_path_circle.restype = None
dll.pixie_path_polygon.argtypes = [Path, c_float, c_float, c_float, c_longlong]
dll.pixie_path_polygon.restype = None
dll.pixie_typeface_unref.argtypes = [Typeface]
dll.pixie_typeface_unref.restype = None
dll.pixie_typeface_get_file_path.argtypes = [Typeface]
dll.pixie_typeface_get_file_path.restype = c_char_p
dll.pixie_typeface_set_file_path.argtypes = [Typeface, c_char_p]
dll.pixie_typeface_set_file_path.restype = None
dll.pixie_typeface_ascent.argtypes = [Typeface]
dll.pixie_typeface_ascent.restype = c_float
dll.pixie_typeface_descent.argtypes = [Typeface]
dll.pixie_typeface_descent.restype = c_float
dll.pixie_typeface_line_gap.argtypes = [Typeface]
dll.pixie_typeface_line_gap.restype = c_float
dll.pixie_typeface_line_height.argtypes = [Typeface]
dll.pixie_typeface_line_height.restype = c_float
dll.pixie_typeface_has_glyph.argtypes = [Typeface, c_int]
dll.pixie_typeface_has_glyph.restype = c_bool
dll.pixie_typeface_get_glyph_path.argtypes = [Typeface, c_int]
dll.pixie_typeface_get_glyph_path.restype = Path
dll.pixie_typeface_get_advance.argtypes = [Typeface, c_int]
dll.pixie_typeface_get_advance.restype = c_float
dll.pixie_typeface_get_kerning_adjustment.argtypes = [Typeface, c_int, c_int]
dll.pixie_typeface_get_kerning_adjustment.restype = c_float
dll.pixie_typeface_new_font.argtypes = [Typeface]
dll.pixie_typeface_new_font.restype = Font
dll.pixie_font_unref.argtypes = [Font]
dll.pixie_font_unref.restype = None
dll.pixie_font_get_typeface.argtypes = [Font]
dll.pixie_font_get_typeface.restype = Typeface
dll.pixie_font_set_typeface.argtypes = [Font, Typeface]
dll.pixie_font_set_typeface.restype = None
dll.pixie_font_get_size.argtypes = [Font]
dll.pixie_font_get_size.restype = c_float
dll.pixie_font_set_size.argtypes = [Font, c_float]
dll.pixie_font_set_size.restype = None
dll.pixie_font_get_line_height.argtypes = [Font]
dll.pixie_font_get_line_height.restype = c_float
dll.pixie_font_set_line_height.argtypes = [Font, c_float]
dll.pixie_font_set_line_height.restype = None
dll.pixie_font_paints_len.argtypes = [Font]
dll.pixie_font_paints_len.restype = c_longlong
dll.pixie_font_paints_get.argtypes = [Font, c_longlong]
dll.pixie_font_paints_get.restype = Paint
dll.pixie_font_paints_set.argtypes = [Font, c_longlong, Paint]
dll.pixie_font_paints_set.restype = None
dll.pixie_font_paints_delete.argtypes = [Font, c_longlong]
dll.pixie_font_paints_delete.restype = None
dll.pixie_font_paints_add.argtypes = [Font, Paint]
dll.pixie_font_paints_add.restype = None
dll.pixie_font_paints_clear.argtypes = [Font]
dll.pixie_font_paints_clear.restype = None
dll.pixie_font_get_paint.argtypes = [Font]
dll.pixie_font_get_paint.restype = Paint
dll.pixie_font_set_paint.argtypes = [Font, Paint]
dll.pixie_font_set_paint.restype = None
dll.pixie_font_get_text_case.argtypes = [Font]
dll.pixie_font_get_text_case.restype = TextCase
dll.pixie_font_set_text_case.argtypes = [Font, TextCase]
dll.pixie_font_set_text_case.restype = None
dll.pixie_font_get_underline.argtypes = [Font]
dll.pixie_font_get_underline.restype = c_bool
dll.pixie_font_set_underline.argtypes = [Font, c_bool]
dll.pixie_font_set_underline.restype = None
dll.pixie_font_get_strikethrough.argtypes = [Font]
dll.pixie_font_get_strikethrough.restype = c_bool
dll.pixie_font_set_strikethrough.argtypes = [Font, c_bool]
dll.pixie_font_set_strikethrough.restype = None
dll.pixie_font_get_no_kerning_adjustments.argtypes = [Font]
dll.pixie_font_get_no_kerning_adjustments.restype = c_bool
dll.pixie_font_set_no_kerning_adjustments.argtypes = [Font, c_bool]
dll.pixie_font_set_no_kerning_adjustments.restype = None
dll.pixie_font_scale.argtypes = [Font]
dll.pixie_font_scale.restype = c_float
dll.pixie_font_default_line_height.argtypes = [Font]
dll.pixie_font_default_line_height.restype = c_float
dll.pixie_font_typeset.argtypes = [Font, c_char_p, Vector2, HorizontalAlignment, VerticalAlignment, c_bool]
dll.pixie_font_typeset.restype = Arrangement
dll.pixie_font_compute_bounds.argtypes = [Font, c_char_p]
dll.pixie_font_compute_bounds.restype = Vector2
dll.pixie_span_unref.argtypes = [Span]
dll.pixie_span_unref.restype = None
dll.pixie_new_span.argtypes = [c_char_p, Font]
dll.pixie_new_span.restype = c_ulonglong
dll.pixie_span_get_text.argtypes = [Span]
dll.pixie_span_get_text.restype = c_char_p
dll.pixie_span_set_text.argtypes = [Span, c_char_p]
dll.pixie_span_set_text.restype = None
dll.pixie_span_get_font.argtypes = [Span]
dll.pixie_span_get_font.restype = Font
dll.pixie_span_set_font.argtypes = [Span, Font]
dll.pixie_span_set_font.restype = None
dll.pixie_arrangement_unref.argtypes = [Arrangement]
dll.pixie_arrangement_unref.restype = None
dll.pixie_arrangement_compute_bounds.argtypes = [Arrangement]
dll.pixie_arrangement_compute_bounds.restype = Vector2
dll.pixie_context_unref.argtypes = [Context]
dll.pixie_context_unref.restype = None
dll.pixie_new_context.argtypes = [c_longlong, c_longlong]
dll.pixie_new_context.restype = c_ulonglong
dll.pixie_context_get_image.argtypes = [Context]
dll.pixie_context_get_image.restype = Image
dll.pixie_context_set_image.argtypes = [Context, Image]
dll.pixie_context_set_image.restype = None
dll.pixie_context_get_fill_style.argtypes = [Context]
dll.pixie_context_get_fill_style.restype = Paint
dll.pixie_context_set_fill_style.argtypes = [Context, Paint]
dll.pixie_context_set_fill_style.restype = None
dll.pixie_context_get_stroke_style.argtypes = [Context]
dll.pixie_context_get_stroke_style.restype = Paint
dll.pixie_context_set_stroke_style.argtypes = [Context, Paint]
dll.pixie_context_set_stroke_style.restype = None
dll.pixie_context_get_global_alpha.argtypes = [Context]
dll.pixie_context_get_global_alpha.restype = c_float
dll.pixie_context_set_global_alpha.argtypes = [Context, c_float]
dll.pixie_context_set_global_alpha.restype = None
dll.pixie_context_get_line_width.argtypes = [Context]
dll.pixie_context_get_line_width.restype = c_float
dll.pixie_context_set_line_width.argtypes = [Context, c_float]
dll.pixie_context_set_line_width.restype = None
dll.pixie_context_get_miter_limit.argtypes = [Context]
dll.pixie_context_get_miter_limit.restype = c_float
dll.pixie_context_set_miter_limit.argtypes = [Context, c_float]
dll.pixie_context_set_miter_limit.restype = None
dll.pixie_context_get_line_cap.argtypes = [Context]
dll.pixie_context_get_line_cap.restype = LineCap
dll.pixie_context_set_line_cap.argtypes = [Context, LineCap]
dll.pixie_context_set_line_cap.restype = None
dll.pixie_context_get_line_join.argtypes = [Context]
dll.pixie_context_get_line_join.restype = LineJoin
dll.pixie_context_set_line_join.argtypes = [Context, LineJoin]
dll.pixie_context_set_line_join.restype = None
dll.pixie_context_get_font.argtypes = [Context]
dll.pixie_context_get_font.restype = c_char_p
dll.pixie_context_set_font.argtypes = [Context, c_char_p]
dll.pixie_context_set_font.restype = None
dll.pixie_context_get_font_size.argtypes = [Context]
dll.pixie_context_get_font_size.restype = c_float
dll.pixie_context_set_font_size.argtypes = [Context, c_float]
dll.pixie_context_set_font_size.restype = None
dll.pixie_context_get_text_align.argtypes = [Context]
dll.pixie_context_get_text_align.restype = HorizontalAlignment
dll.pixie_context_set_text_align.argtypes = [Context, HorizontalAlignment]
dll.pixie_context_set_text_align.restype = None
dll.pixie_context_save.argtypes = [Context]
dll.pixie_context_save.restype = None
dll.pixie_context_save_layer.argtypes = [Context]
dll.pixie_context_save_layer.restype = None
dll.pixie_context_restore.argtypes = [Context]
dll.pixie_context_restore.restype = None
dll.pixie_context_begin_path.argtypes = [Context]
dll.pixie_context_begin_path.restype = None
dll.pixie_context_close_path.argtypes = [Context]
dll.pixie_context_close_path.restype = None
dll.pixie_context_fill.argtypes = [Context, WindingRule]
dll.pixie_context_fill.restype = None
dll.pixie_context_path_fill.argtypes = [Context, Path, WindingRule]
dll.pixie_context_path_fill.restype = None
dll.pixie_context_clip.argtypes = [Context, WindingRule]
dll.pixie_context_clip.restype = None
dll.pixie_context_path_clip.argtypes = [Context, Path, WindingRule]
dll.pixie_context_path_clip.restype = None
dll.pixie_context_stroke.argtypes = [Context]
dll.pixie_context_stroke.restype = None
dll.pixie_context_path_stroke.argtypes = [Context, Path]
dll.pixie_context_path_stroke.restype = None
dll.pixie_context_measure_text.argtypes = [Context, c_char_p]
dll.pixie_context_measure_text.restype = TextMetrics
dll.pixie_context_get_transform.argtypes = [Context]
dll.pixie_context_get_transform.restype = Matrix3
dll.pixie_context_set_transform.argtypes = [Context, Matrix3]
dll.pixie_context_set_transform.restype = None
dll.pixie_context_transform.argtypes = [Context, Matrix3]
dll.pixie_context_transform.restype = None
dll.pixie_context_reset_transform.argtypes = [Context]
dll.pixie_context_reset_transform.restype = None
dll.pixie_context_draw_image.argtypes = [Context, Image, c_float, c_float]
dll.pixie_context_draw_image.restype = None
dll.pixie_context_draw_image_2.argtypes = [Context, Image, c_float, c_float, c_float, c_float]
dll.pixie_context_draw_image_2.restype = None
dll.pixie_context_draw_image_3.argtypes = [Context, Image, c_float, c_float, c_float, c_float, c_float, c_float, c_float, c_float]
dll.pixie_context_draw_image_3.restype = None
dll.pixie_context_move_to.argtypes = [Context, c_float, c_float]
dll.pixie_context_move_to.restype = None
dll.pixie_context_line_to.argtypes = [Context, c_float, c_float]
dll.pixie_context_line_to.restype = None
dll.pixie_context_bezier_curve_to.argtypes = [Context, c_float, c_float, c_float, c_float, c_float, c_float]
dll.pixie_context_bezier_curve_to.restype = None
dll.pixie_context_quadratic_curve_to.argtypes = | |
import os
import sys
import time
from PIL import Image
import SBOL_File as sbol
import Logical_Representation as logic
import SBOL_visual as visual
from PyQt5 import QtCore, QtGui, uic, QtWidgets
from PyQt5.QtCore import pyqtSlot, QCoreApplication, QBasicTimer, QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication, QDialog, QPushButton, QLabel, QLineEdit, QMessageBox, QFileDialog, QTabWidget, QWidget, QListWidget, QProgressBar
from PyQt5.uic import loadUi
from PyQt5.QtGui import QIcon, QFont, QPixmap
from itertools import product
from functions import *
from time import sleep
import random
from main import process
import sys
sys.path.append("circuit_canvas/")
from main_window import CircuitBuilder
font = QFont("Times", 11)
# The main class which operates the entire widnow
class MainPage(QtWidgets.QMainWindow):
def __init__(self):
if not os.path.exists(os.getcwd()+'/user_files'):
os.makedirs(os.getcwd()+'/user_files')
# Lists which are being used in the code later
self.result=[]
self.tablist=[]
self.checkList=[]
self.checkxmlList=[]
super(MainPage, self).__init__()
#Loading the UI file which have been created for the main window
loadUi('Genetech.ui', self)
#Setting the logos for the window
self.setWindowIcon(QtGui.QIcon(os.getcwd()+'/icons/SmallLogo.png'))
self.setWindowTitle("GeneTech - v2.0")
pixmap = QPixmap(os.getcwd()+'/icons/BigLogo.png')
self.MainLogo.setPixmap(pixmap)
#Initial Label in the status bar
self.statusBar().showMessage('Ready')
# Button Entries which have been coded and these are called when button are clicked
self.SaveButton.clicked.connect(self.SaveLabel)
self.DrawButton.clicked.connect(self.DrawWindow)
self.ViewButton.clicked.connect(self.viewCircuit)
self.ImportNotesButton.clicked.connect(self.FileOpenDialog)
self.SaveNotesButton.clicked.connect(self.SaveNotes)
self.EnterButton.clicked.connect(self.EnterExp)
self.ExitButton.clicked.connect(self.ResetAll)
self.CircuitList.doubleClicked.connect(self.saveImageDialog)
self.xmlList.clicked.connect(self.ReadXMLFile)
self.bexppp = self.InsertExpressionEdit.text()
self.LabelforList = QLabel(self.tab)
self.doubleSpinBox.setSuffix(" s")
self.actionExit.triggered.connect(self.CloseApp)
self.actionAbout.triggered.connect(self.About)
#Keyboard Shortcuts for some funtionalities
self.EnterButton.setShortcut("Return")
#self.actionSave.setShortcut("Ctrl+S")
self.actionExit.setShortcut("Ctrl+Q")
self.actionAbout.setShortcut("Ctrl+O")
self.ExitButton.setShortcut("Ctrl+R")
# Messages on the status bar when mouse is hovered on different windows parts
self.actionAbout.setStatusTip("Know more about GeneTech by clicking this button")
self.actionExit.setStatusTip("Reset")
self.EnterButton.setStatusTip("Press the button for result")
self.ExitButton.setStatusTip("Exit the window")
self.InsertExpressionEdit.setStatusTip("Insert a Boolean expression here")
#This function is to open the drawing canvas
def DrawWindow(self):
self.circuit_builder = CircuitBuilder(self)
self.circuit_builder.show()
self.hide()
#Takes the boolean expression of the circuit drawn in the circuit canvas, processes it as before
#and after performing all relevant functions lists the output circuits and SBOL files
def processDrawEquation(self, bexp):
if self.DelayRadioButton.isChecked():
option = 0
elif self.GatesRadioButton.isChecked():
option = 1
a=0
self.InsertExpressionEdit.setText(bexp)
self.ProgressBar.setVisible(True)
self.ProgressBar.setValue(0)
self.result.append("a")
process(bexp)
DisplayData()
DisplayCircuits()
self.ProgressBar.setValue(25)
sleep(1)
number = random.randint(30,70)
self.ProgressBar.setValue(number)
sbol.SBOL_File(self.spinBox.value(), self.doubleSpinBox.value(), option, self.CircuitSpinBox.value()) #create SBOl files
number = random.randint(75,90)
self.ProgressBar.setValue(number)
sleep(0.1)
logic.Logical_Representation(self.spinBox.value(), self.doubleSpinBox.value(), option, self.CircuitSpinBox.value()) #Create Logical Representation images
visual.SBOLv(self.spinBox.value(), self.doubleSpinBox.value(), option, self.CircuitSpinBox.value()) #create SBOL visual Representation images
self.ProgressBar.setValue(100)
bexp = Convert(bexp)
bexp = "".join(bexp.split())
#bexp = bexp.strip() #Remove spaces in the expression
finalexp=[]
exp = bexp.split("+") #change the notations
for i in range(len(exp)):
term = exp[i].split(".")
finalterm=[]
for j in range(len(term)):
if term[j][-1]=="'":
finalterm.append("not(" + term[j][:-1] + ")")
else:
finalterm.append(term[j])
finalexp.append("("+" and ".join(finalterm)+")")
bexp = " or ".join(finalexp)
code = compile(bexp, '', 'eval') #evaluation of expression
TruthTable_Input = code.co_names # Generates the number of inputs in an expression. In a.b there are 2 inputs 'a' and 'b'
for values1 in product(range(2), repeat=len(TruthTable_Input)): # generate the values of entrid
header_count=2**(len(values1))
List_TruthTable_Input = [[] for i in range(1, header_count+1)]
self.TruthList.clear()
for BexpIndex in range(len(TruthTable_Input)): #make the list for TruthTable_Input to show on main window
self.ttList.append(TruthTable_Input[BexpIndex])
self.ttList.append(" ")
self.ttList.append(": ")
self.ttList.append(bexp)
s = [str(i) for i in self.ttList]
res = " ".join(s)
self.TruthList.addItem(res)
self.ttList.clear()
for values in product(range(2), repeat=len(TruthTable_Input)):# put inputs of espression together
for w in range(len(values)):
List_TruthTable_Input[a].append(str(values[w]))
a+=1
env = dict(zip(TruthTable_Input, values)) #put the TruthTable_Input and values togather
pk = int(eval(code, env)) #generate the output of truthtable
for v in values: #append the list to show on main window
self.ttList.append(v)
self.ttList.append(" ")
self.ttList.append(": ")
self.ttList.append(pk)
s = [str(i) for i in self.ttList]
res = " ".join(s)
self.TruthList.addItem(res)
self.ttList.clear()
if len(self.result) > 0: #Call these functions only if there is an expression
self.CreateCircuitList()
self.CreateXMLList()
self.result.clear()
# This funtion reads the txt which of circuits and returns a list
#with the number of generated circuits by the inserted boolean expression
def ReadCircuitsFile(self):
f = open("circuits.txt")
circuits = []
for i in f:
if "*" in i:
cnt = []
circuits.append(cnt)
else:
cnt.append(i.replace('\n',''))
for i in circuits:
for j in i:
if j == '':
i.remove(j)
return circuits
def viewCircuit(self):
if self.CircuitList.currentItem():
img = Image.open('user_files/'+str(self.CircuitList.currentItem().text())+".png")
#print('user_files/'+str(self.CircuitList.currentItem().text())+".png")
img.show()
def SaveLabel(self):
item = self.CircuitList.currentItem()
self.saveImageDialog()
# When the ciruits are developed using the boolean expression
#This function creates the list of the circuts by reading the
# txt file of the circuits. It first reads the the number of
#circuits int the txt and then creates that much entries in the
#Circuit list available on the main window
def CreateCircuitList(self):
circuits = self.ReadCircuitsFile()
if len(self.checkList) > 0:
self.CircuitList.clear()
self.checkList.clear()
for CircuitIndex in range(CountFiles()):
self.CircuitList.addItem("Circuit "+str(CircuitIndex+1)+" Logic")
self.CircuitList.addItem("Circuit "+str(CircuitIndex+1)+" SBOL Visual")
self.checkList.append("Check")
else:
for CircuitIndex in range(CountFiles()):
self.CircuitList.addItem("Circuit "+str(CircuitIndex+1)+" Logic")
self.CircuitList.addItem("Circuit "+str(CircuitIndex+1)+" SBOL Visual")
self.checkList.append("Check")
#Code for importing a file in Notes
def FileOpenDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
UserfileName, _ = QFileDialog.getOpenFileName(self,"Import File to Notes", "","All Files (*);;TxtFiles (*.txt)", options=options)
if UserfileName:
f = open(UserfileName,"r")
data = f.read()
self.Notes.setText(data)
# When the ciruits are developed using the boolean expression
#This function creates the list of XML files of the
#generated circuts by reading the
# txt file of the circuits. It first reads the the number of
#circuits int the txt and then creates that much entries in the
#SBOL file list available on the main window. User can click on the
#file and save it for later use
def CreateXMLList(self):
circuits = self.ReadCircuitsFile()
if len(self.checkxmlList) > 0:
self.xmlList.clear()
self.checkxmlList.clear()
for CircuitIndex in range(CountFiles()):
self.xmlList.addItem("SBOL File "+str(CircuitIndex+1))
self.checkxmlList.append("Check")
else:
for CircuitIndex in range(CountFiles()):
self.xmlList.addItem("SBOL File "+str(CircuitIndex+1))
self.checkxmlList.append("Check")
#This funtoin is created to save the xml file for the generated circuits.
#Upon clicking this function open a saving browser and ask user to enter a
#UserfileName (the name with the user wants to save the file). It checks if
#a file with same name already exists. If yes, then it asks for replacement.
#If not, then it create a file with the given name. Hereafter, it opens a file
#with the same name as the name clicked on the list. It reads it and copies
#whole data to the newly created file.
def FileSaveDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
UserfileName, _ = QFileDialog.getSaveFileName(self,"Save SBOL File","","All Files (*);;XML Files (*.xml)", options=options)
if UserfileName:
fileName = UserfileName.split("/")[-1]
if (":" in fileName) or ("?" in fileName) or ("/" in fileName) or ("*" in fileName) or ("<" in fileName) or (">" in fileName) or ("|" in fileName) or ('"' in fileName):
QMessageBox.about(self, "Alert", "A file name can't contain any of the following \n \ / : * ? < > |")
else:
f= open(UserfileName,"w+")
item = self.xmlList.currentItem()
fo = open(str(item.text())+".xml")
for i in fo:
f.write(i)
def ReadXMLFile(self):
item = self.xmlList.currentItem()
file = item.text()
f = open(str(file)+".xml","r")
data = f.read()
self.Notes.setText(data)
#THis functions save the text from the Notes Tab on Main window
def SaveNotes(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
InputFile, _ = QFileDialog.getSaveFileName(self,"Save Notes","","All Files (*);;Txt Files (*.txt)", options=options)
Text = self.Notes.toPlainText()
if InputFile:
f= open(InputFile+".xml","w+")
f.write(Text)
#This funtoin is created to save an image file for the generated circuits.
#Upon clicking this function open a saving browser ans ask user to enter a
#UserfileName (the name with the user wants to save the file). It checks if
#a file with same name already exists. If yes, then it asks for replacement.
#If not, then it create a file with the given name. Hereafter, it opens a file
#with the same name as the name clicked on the list. It reads it and saves that
#image as a newly created file.
def saveImageDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
UserfileName, _ = QFileDialog.getSaveFileName(self,"Save Image File","","All Files (*);;Image Files (*.png)", options=options)
if UserfileName:
fileName = UserfileName.split("/")[-1]
if (":" in fileName) or ("?" in fileName) or ("/" in fileName) or ("*" in fileName) or ("<" in fileName) or (">" in fileName) or ("|" in fileName) or ('"' in fileName):
QMessageBox.about(self, "Alert", "A file name can't contain any of the following \n \ / : * ? < > |")
else:
item = self.CircuitList.currentItem() #the selected item
saveimg = Image.open('user_files/'+str(item.text())+".png") #use this image to save
saveimg.save(str(UserfileName)+".png") #save image as
#or "?" in UserfileName or "/" in UserfileName or "*" in UserfileName or "<" in UserfileName or ">" in UserfileName or "|" in UserfileName | |
non-random) and random documents do not overlap.
* The range is doubled so that it's possible vary elements in a new
array.
* The left side of range is randomly shifted.
Here is an example of a new random array for seq_id=7, total 100
documents and 10 elements in array:
1) offset is set to 1000.
2) offset is incremented by 140.
3) offset is incremented by a random number (e.g., 5).
4) [1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154]
array is generated.
Steps for seq_id=8 are the following:
1) offset is set to 1000.
2) offset is incremented by 160.
3) offset is incremented by a random number (e.g., 2).
4) [1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171]
array is generated.
"""
offset = seq_id * self.array_size
if self.is_random:
offset = self.num_docs * self.array_size
offset += 2 * seq_id * self.array_size
offset += random.randint(1, self.array_size)
return [int(offset + i) for i in range(self.array_size)]
def build_achievements2(self, seq_id: int) -> List[int]:
"""Build an array of integers.
achievements2 is very similar to achievements1. However, in case of
achievements2 ranges overlap so that multiple documents case satisfy the
same queries. Overlapping is achieving by integer division using
ARRAY_CAP constant.
"""
offset = seq_id // self.ARRAY_CAP * self.ARRAY_SIZE
if self.is_random:
offset = self.num_docs * self.ARRAY_SIZE
offset += (2 * seq_id) // self.ARRAY_CAP * self.ARRAY_SIZE
offset += random.randint(1, self.ARRAY_SIZE)
return [int(offset + i) for i in range(self.ARRAY_SIZE)]
def next(self, key: Key) -> dict:
alphabet = self.build_alphabet(key.string)
size = self._size()
return {
'name': self.build_name(alphabet),
'email': self.build_email(alphabet),
'alt_email': self.build_alt_email(alphabet),
'street': self.build_street(alphabet),
'city': self.build_city(alphabet),
'county': self.build_county(alphabet),
'state': self.build_state(alphabet),
'full_state': self.build_full_state(alphabet),
'country': self.build_country(alphabet),
'realm': self.build_realm(alphabet),
'coins': self.build_coins(alphabet),
'category': self.build_category(alphabet),
'achievements1': self.build_achievements1(key.number + 1),
'achievements2': self.build_achievements2(key.number + 1),
'gmtime': self.build_gmtime(alphabet),
'year': self.build_year(alphabet),
'body': self.build_string(alphabet, size),
'capped_small': self.build_capped(alphabet, key.number, 100),
'topics': self.build_topics(key.number),
}
class ArrayIndexingUniqueDocument(ReverseLookupDocument):
def __init__(self, avg_size: int, prefix: str, array_size: int, num_docs: int):
super().__init__(avg_size, prefix)
self.array_size = array_size
self.num_docs = num_docs
def build_achievements1(self, seq_id: int) -> List[int]:
offset = seq_id * self.array_size
if self.is_random:
offset = self.num_docs * self.array_size
offset += 2 * seq_id * self.array_size
offset += random.randint(1, self.array_size)
return [int(offset + i) for i in range(self.array_size)]
def next(self, key: Key) -> dict:
alphabet = self.build_alphabet(key.string)
size = self._size()
return {
'name': self.build_name(alphabet),
'email': self.build_email(alphabet),
'alt_email': self.build_alt_email(alphabet),
'street': self.build_street(alphabet),
'city': self.build_city(alphabet),
'county': self.build_county(alphabet),
'state': self.build_state(alphabet),
'full_state': self.build_full_state(alphabet),
'country': self.build_country(alphabet),
'realm': self.build_realm(alphabet),
'coins': self.build_coins(alphabet),
'category': self.build_category(alphabet),
'achievements1': self.build_achievements1(key.number + 1),
'gmtime': self.build_gmtime(alphabet),
'year': self.build_year(alphabet),
'body': self.build_string(alphabet, size),
'capped_small': self.build_capped(alphabet, key.number, 100),
'topics': self.build_topics(key.number),
}
class ArrayIndexingRangeScanDocument(ReverseLookupDocument):
ARRAY_CAP = 100
ARRAY_SIZE = 10
OVERHEAD = 530
def __init__(self, avg_size: int, prefix: str, array_size: int, num_docs: int):
super().__init__(avg_size, prefix)
self.array_size = array_size
self.num_docs = num_docs
def build_achievements2(self, seq_id: int) -> List[int]:
offset = seq_id // self.ARRAY_CAP * self.array_size
if self.is_random:
offset = self.num_docs * self.array_size
offset += (2 * seq_id) // self.ARRAY_CAP * self.array_size
offset += random.randint(1, self.array_size)
return [int(offset + i) for i in range(self.array_size)]
def next(self, key: Key) -> dict:
alphabet = self.build_alphabet(key.string)
size = self._size()
return {
'name': self.build_name(alphabet),
'email': self.build_email(alphabet),
'alt_email': self.build_alt_email(alphabet),
'street': self.build_street(alphabet),
'city': self.build_city(alphabet),
'county': self.build_county(alphabet),
'state': self.build_state(alphabet),
'full_state': self.build_full_state(alphabet),
'country': self.build_country(alphabet),
'realm': self.build_realm(alphabet),
'coins': self.build_coins(alphabet),
'category': self.build_category(alphabet),
'achievements2': self.build_achievements2(key.number + 1),
'gmtime': self.build_gmtime(alphabet),
'year': self.build_year(alphabet),
'body': self.build_string(alphabet, size),
'capped_small': self.build_capped(alphabet, key.number, 100),
'topics': self.build_topics(key.number),
}
class ProfileDocument(ReverseLookupDocument):
OVERHEAD = 390
def build_capped(self, *args):
capped = super().build_capped(*args)
return capped.replace('_', '')
def build_zip(self, seq_id: int) -> str:
if self.is_random:
zip_code = random.randint(70000, 90000)
else:
zip_code = 70000 + seq_id % 20000
return str(zip_code)
def build_long_street(self, alphabet: str, seq_id: int, capped_small: str,
capped_large: str) -> str:
if self.is_random:
num = random.randint(0, 1000)
idx = random.randint(0, NUM_STREET_SUFFIXES - 1)
else:
num = seq_id % 5000
idx = alphabet.find('7') % NUM_STREET_SUFFIXES
suffix = STREET_SUFFIX[idx]
return '%d %s %s %s' % (num, capped_small, capped_large, suffix)
def next(self, key: Key) -> dict:
alphabet = self.build_alphabet(key.string)
size = self._size()
category = self.build_category(alphabet) + 1
capped_large = self.build_capped(alphabet, key.number, 1000 * category)
capped_small = self.build_capped(alphabet, key.number, 10)
return {
'first_name': self.build_name(alphabet),
'last_name': self.build_street(alphabet),
'email': self.build_email(alphabet),
'balance': self.build_coins(alphabet),
'date': {
'gmtime': self.build_gmtime(alphabet),
'year': self.build_year(alphabet),
},
'capped_large': capped_large,
'address': {
'street': self.build_long_street(alphabet,
key.number,
capped_small,
capped_large),
'city': self.build_city(alphabet),
'county': self.build_county(alphabet),
'state': self.build_state(alphabet),
'zip': self.build_zip(key.number),
'realm': self.build_realm(alphabet),
},
'body': self.build_string(alphabet, size),
}
class ImportExportDocument(ReverseLookupDocument):
"""Extend ReverseLookupDocument by adding 25 fields with random size."""
OVERHEAD = 1022
def next(self, key: Key) -> dict:
alphabet = self.build_alphabet(key.string)
size = self._size()
return {
'name': self.build_name(alphabet) * random.randint(0, 5),
'email': self.build_email(alphabet) * random.randint(0, 5),
'alt_email': self.build_alt_email(
alphabet) * random.randint(0, 5),
'street': self.build_street(alphabet) * random.randint(0, 9),
'city': self.build_city(alphabet) * random.randint(0, 9),
'county': self.build_county(alphabet) * random.randint(0, 5),
'state': self.build_state(alphabet) * random.randint(0, 5),
'full_state': self.build_full_state(
alphabet) * random.randint(0, 5),
'country': self.build_country(
alphabet) * random.randint(0, 5),
'realm': self.build_realm(
alphabet) * random.randint(0, 9),
'alt_street': self.build_street(
alphabet) * random.randint(0, 9),
'alt_city': self.build_city(
alphabet) * random.randint(0, 9),
'alt_county': self.build_county(
alphabet) * random.randint(0, 5),
'alt_state': self.build_state(
alphabet) * random.randint(0, 5),
'alt_full_state': self.build_full_state(
alphabet) * random.randint(0, 5),
'alt_country': self.build_country(
alphabet) * random.randint(0, 5),
'alt_realm': self.build_realm(
alphabet) * random.randint(0, 9),
'coins': self.build_coins(
alphabet) * random.randint(0, 999),
'category': self.build_category(
alphabet) * random.randint(0, 5),
'achievements': self.build_achievements(alphabet),
'gmtime': self.build_gmtime(alphabet) * random.randint(0, 9),
'year': self.build_year(alphabet) * random.randint(0, 5),
'body': self.build_string(alphabet, size),
'capped_small': self.build_capped(
alphabet, key.number, 100) * random.randint(0, 5),
'alt_capped_small': self.build_capped(
alphabet, key.number, 100) * random.randint(0, 5),
}
class ImportExportDocumentArray(ImportExportDocument):
"""Extend ImportExportDocument by adding array docs.
The documents contain 25 top-level fields with variable-size arrays.
"""
OVERHEAD = 0
def _random_array(self, value: str, num: int):
if value == '':
return []
if len(value) < num:
return [value] * 5
scope = sorted(random.sample(range(len(value)), num))
result = [value[0 if i == 0 else scope[i - 1]:i + scope[i]] for i in range(num)]
return result
def next(self, key: Key) -> dict:
alphabet = self.build_alphabet(key.string)
size = self._size()
# 25 Fields of random size. Have an array with at least 10 items in five fields.
return {
'name': self._random_array(self.build_name(
alphabet) * random.randint(0, 9), 5),
'email': self.build_email(
alphabet) * random.randint(0, 5),
'alt_email': self.build_alt_email(
alphabet) * random.randint(0, 9),
'street': self._random_array(self.build_street(
alphabet) * random.randint(0, 9), 5),
'city': self._random_array(self.build_city(
alphabet) * random.randint(0, 9), 5),
'county': self._random_array(self.build_county(
alphabet) * random.randint(0, 9), 5),
'state': self._random_array(self.build_state(
alphabet) * random.randint(0, 9), 5),
'full_state': self._random_array(self.build_full_state(
alphabet) * random.randint(0, 9), 5),
'country': self._random_array(self.build_country(
alphabet) * random.randint(0, 9), 5),
'realm': self.build_realm(alphabet) * random.randint(0, 9),
'alt_street': self._random_array(self.build_street(
alphabet) * random.randint(0, 9), 5),
'alt_city': self._random_array(self.build_city(
alphabet) * random.randint(0, 9), 5),
'alt_county': self.build_county(
alphabet) * random.randint(0, 9),
'alt_state': self.build_state(
alphabet) * random.randint(0, 9),
'alt_full_state': self.build_full_state(
alphabet) * random.randint(0, 9),
'alt_country': self.build_country(
alphabet) * random.randint(0, 9),
'alt_realm': self.build_realm(
alphabet) * random.randint(0, 9),
'coins': self.build_coins(
alphabet) * random.randint(0, 999),
'category': self.build_category(
alphabet) * random.randint(0, 9),
'achievements': self.build_achievements(alphabet),
'gmtime': self.build_gmtime(alphabet) * random.randint(0, 9),
'year': self.build_year(alphabet) * random.randint(0, 5),
'body': self._random_array(self.build_string(alphabet, size), 7),
'capped_small': self.build_capped(
alphabet, key.number, 100) * random.randint(0, 5),
'alt_capped_small': self.build_capped(
alphabet, key.number, 100) * random.randint(0, 5),
}
class ImportExportDocumentNested(ImportExportDocument):
"""Extend ImportExportDocument by adding nested docs.
The documents contain 25 top-level fields (5 nested sub-documents).
"""
def next(self, key: Key) -> dict:
alphabet = self.build_alphabet(key.string)
size = self._size()
return {
'name': {'n': {'a': {'m': {'e': self.build_name(
alphabet) * random.randint(0, 3)}}}},
'email': {'e': {'m': {'a': {'i': self.build_email(
alphabet) * random.randint(0, 3)}}}},
'alt_email': {'a': {'l': {'t': {'e': self.build_alt_email(
alphabet) * random.randint(0, 3)}}}},
'street': {'s': {'t': {'r': {'e': self.build_street(
alphabet) * random.randint(0, 3)}}}},
'city': {'c': {'i': {'t': {'y': self.build_city(
alphabet) * random.randint(0, 3)}}}},
'county': {'c': {'o': {'u': {'n': self.build_county(
alphabet) * random.randint(0, 3)}}}},
'state': {'s': {'t': {'a': {'t': self.build_state(
alphabet) * random.randint(0, 3)}}}},
'full_state': {'f': {'u': {'l': {'l': self.build_full_state(
alphabet) * random.randint(0, 3)}}}},
'country': {'c': {'o': {'u': {'n': self.build_country(
alphabet) * random.randint(0, 3)}}}},
'realm': {'r': {'e': {'a': {'l': self.build_realm(
alphabet) * random.randint(0, 3)}}}},
'alt_street': {'a': {'l': {'t': {'s': self.build_street(
alphabet) * random.randint(0, 3)}}}},
'alt_city': {'a': {'l': {'t': {'c': self.build_city(
alphabet) * random.randint(0, 3)}}}},
'alt_county': {'e': {'m': {'a': {'i': self.build_county(
alphabet) * random.randint(0, 3)}}}},
'alt_state': {'e': {'m': {'a': {'i': self.build_state(
alphabet) * random.randint(0, 3)}}}},
'alt_full_state': {'e': {'m': {'a': {'i': | |
Modules for Ray Based Renderer ##################################################
####################################################################################################################################
class RaySampler(nn.Module):
def __init__(self, num_azi, num_polar, interval_polar = 5, mode = 'reflect'):
super().__init__()
self.register_buffer('num_azi', torch.tensor(num_azi))
self.register_buffer('num_polar', torch.tensor(num_polar))
self.register_buffer('interval_polar', torch.tensor(interval_polar))
self.mode = mode
roty_rad = np.arange(1, num_polar + 1) * interval_polar * np.pi / 180.0
rotz_rad = np.arange(num_azi) * 2 * np.pi / num_azi
roty_rad, rotz_rad = np.meshgrid(roty_rad, rotz_rad, sparse = False)
roty_rad = roty_rad.flatten()
rotz_rad = rotz_rad.flatten()
rotx_rad = np.zeros_like(roty_rad)
self.rot_rad = np.vstack((rotx_rad, roty_rad, rotz_rad)) # [3, num_ray]
self.num_ray = self.rot_rad.shape[1] + 1
Rs = np.zeros((self.num_ray, 3, 3), dtype = np.float32)
Rs[0, :, :] = np.eye(3)
for i in range(self.num_ray - 1):
Rs[i + 1, :, :] = euler_to_rot(self.rot_rad[:, i])
self.register_buffer('Rs', torch.from_numpy(Rs)) # [num_ray, 3, 3]
# pivots in tangent space
pivots_dir = torch.matmul(self.Rs, torch.FloatTensor([0, 0, 1], device = self.Rs.device)[:, None])[..., 0].permute((1, 0)) # [3, num_ray]
self.register_buffer('pivots_dir', pivots_dir)
def forward(self, TBN_matrices, view_dir_map_tangent, alpha_map):
'''
TBN_matrices: [N, ..., 3, 3]
view_dir_map_tangent: [N, ..., 3]
alpha_map: [N, ..., 1]
return0: [N, ..., 3, num_ray]
return1: [N, ..., 2, num_ray]
return2: [N, ..., 3, num_ray]
'''
if self.mode == 'reflect':
# reflect view directions around pivots
rays_dir_tangent = camera.get_reflect_dir(view_dir_map_tangent[..., None], self.pivots_dir, dim = -2) * alpha_map[..., None] # [N, ..., 3, num_ray]
# transform to world space
num_ray = rays_dir_tangent.shape[-1]
rays_dir = torch.matmul(TBN_matrices.reshape((-1, 3, 3)), rays_dir_tangent.reshape((-1, 3, num_ray))).reshape((*(TBN_matrices.shape[:-1]), -1)) # [N, ..., 3, num_ray]
else:
rays_dir_tangent = self.pivots_dir # [3, num_ray]
# transform to world space
num_ray = rays_dir_tangent.shape[-1]
rays_dir = torch.matmul(TBN_matrices.reshape((-1, 3, 3)), rays_dir_tangent).reshape((*(TBN_matrices.shape[:-1]), -1)) # [N, ..., 3, num_ray]
rays_dir = torch.nn.functional.normalize(rays_dir, dim = -2)
# get rays uv on light probe
rays_uv = render.spherical_mapping_batch(rays_dir.transpose(1, -2)).transpose(1, -2) # [N, ..., 2, num_ray]
rays_uv = rays_uv * alpha_map[..., None] - (alpha_map[..., None] == 0).to(rays_dir.dtype) # [N, ..., 2, num_ray]
return rays_dir, rays_uv, rays_dir_tangent
class RayRenderer(nn.Module):
def __init__(self, lighting_model, interpolater):
super().__init__()
self.lighting_model = lighting_model
self.interpolater = interpolater
def forward(self, albedo_specular, rays_uv, rays_lt, lighting_idx = None, lp = None, albedo_diffuse = None, num_ray_diffuse = 0, no_albedo = False, seperate_albedo = False, lp_scale_factor = 1):
'''
rays_uv: [N, H, W, 2, num_ray]
rays_lt: [N, num_ray, C, H, W]
albedo_specular: [N, C, H, W]
albedo_diffuse: [N, C, H, W]
return: [N, C, H, W]
'''
num_ray = rays_uv.shape[-1] - num_ray_diffuse
# get light probe
if lp is None:
lp = self.lighting_model(lighting_idx, is_lp = True) # [N, H, W, C]
lp = lp * lp_scale_factor
# get rays color
rays_color = self.interpolater(lp, (rays_uv[..., 0, :] * float(lp.shape[2])).clamp(max = lp.shape[2] - 1), (rays_uv[..., 1, :] * float(lp.shape[1])).clamp(max = lp.shape[1] - 1)).permute((0, -2, -1, 1, 2)) # [N, num_ray, C, H, W]
# get specular light transport map
ltt_specular_map = (rays_lt[:, :num_ray, ...] * rays_color[:, :num_ray, ...]).sum(1) / num_ray # [N, C, H, W]
# get specular component
if no_albedo:
out_specular = ltt_specular_map
else:
out_specular = albedo_specular * ltt_specular_map
if num_ray_diffuse > 0:
# get diffuse light transport map
ltt_diffuse_map = (rays_lt[:, num_ray:, ...] * rays_color[:, num_ray:, ...]).sum(1) / num_ray_diffuse # [N, C, H, W]
# get diffuse component
if no_albedo:
out_diffuse = ltt_diffuse_map
else:
if seperate_albedo:
out_diffuse = albedo_diffuse * ltt_diffuse_map
else:
out_diffuse = albedo_specular * ltt_diffuse_map
else:
ltt_diffuse_map = torch.zeros_like(ltt_specular_map)
out_diffuse = torch.zeros_like(out_specular)
if out_diffuse is not None:
out = out_specular + out_diffuse
else:
out = out_specular
return out, out_specular, out_diffuse, ltt_specular_map, ltt_diffuse_map, rays_color, lp
##########################################################################################################################
################################################## Modules for Lighting ##################################################
##########################################################################################################################
# Spherical Harmonics model
class LightingSH(nn.Module):
def __init__(self, l_dir, lmax, num_lighting = 1, num_channel = 3, init_coeff = None, fix_params = False, lp_recon_h = 100, lp_recon_w = 200):
'''
l_dir: torch.Tensor, [3, num_sample], sampled light directions
lmax: int, maximum SH degree
num_lighting: int, number of lighting
num_channel: int, number of color channels
init_coeff: torch.Tensor, [num_lighting, num_basis, num_channel] or [num_basis, num_channel], initial coefficients
fix_params: bool, whether fix parameters
'''
super().__init__()
self.num_sample = l_dir.shape[1]
self.lmax = lmax
self.num_basis = (lmax + 1) ** 2
self.num_lighting = num_lighting
self.num_channel = num_channel
self.fix_params = fix_params
self.lp_recon_h = lp_recon_h
self.lp_recon_w = lp_recon_w
# get basis value on sampled directions
print('LightingSH.__init__: Computing SH basis value on sampled directions...')
basis_val = torch.from_numpy(sph_harm.evaluate_sh_basis(lmax = lmax, directions = l_dir.cpu().detach().numpy().transpose())).to(l_dir.dtype).to(l_dir.device)
self.register_buffer('basis_val', basis_val) # [num_sample, num_basis]
# basis coefficients as learnable parameters
self.coeff = nn.Parameter(torch.zeros((num_lighting, self.num_basis, num_channel), dtype = torch.float32)) # [num_lighting, num_basis, num_channel]
# initialize basis coeffients
if init_coeff is not None:
if init_coeff.dim == 2:
init_coeff = init_coeff[None, :].repeat((num_lighting, 1, 1))
self.coeff.data = init_coeff
# change to non-learnable
if self.fix_params:
self.coeff.requires_grad_(False)
# precompute light samples
l_samples = sph_harm.reconstruct_sh(self.coeff.data, self.basis_val)
self.register_buffer('l_samples', l_samples) # [num_lighting, num_sample, num_channel]
# precompute SH basis value for reconstructing light probe
lp_samples_recon_v, lp_samples_recon_u = torch.meshgrid([torch.arange(start = 0, end = self.lp_recon_h, step = 1, dtype = torch.float32) / (self.lp_recon_h - 1),
torch.arange(start = 0, end = self.lp_recon_w, step = 1, dtype = torch.float32) / (self.lp_recon_w - 1)])
lp_samples_recon_uv = torch.stack([lp_samples_recon_u, lp_samples_recon_v]).flatten(start_dim = 1, end_dim = -1)
lp_samples_recon_dir = render.spherical_mapping_inv(lp_samples_recon_uv).permute((1, 0)).cpu().detach().numpy()
basis_val_recon = torch.from_numpy(sph_harm.evaluate_sh_basis(lmax = self.lmax, directions = lp_samples_recon_dir)).to(l_dir.dtype).to(l_dir.device)
self.register_buffer('basis_val_recon', basis_val_recon) # [num_lp_pixel, num_basis]
def forward(self, lighting_idx = None, coeff = None, is_lp = None):
'''
coeff: torch.Tensor, [num_lighting, num_basis, num_channel]
return: [1, num_lighting, num_sample, num_channel] or [1, num_sample, num_channel]
'''
if coeff is not None:
if is_lp:
out = self.reconstruct_lp(coeff)[None, :] # [1, num_lighting, H, W, C]
else:
out = sph_harm.reconstruct_sh(coeff, self.basis_val)[None, :]
elif lighting_idx is not None:
if is_lp:
out = self.reconstruct_lp(self.coeff[lighting_idx, :])[None, :] # [1, H, W, C]
else:
if self.fix_params:
out = self.l_samples[lighting_idx, ...][None, :]
else:
out = sph_harm.reconstruct_sh(self.coeff[lighting_idx, ...][None, :], self.basis_val)
else:
if is_lp:
out = self.reconstruct_lp(self.coeff)[None, :] # [1, num_lighting, H, W, C]
else:
if self.fix_params:
out = self.l_samples[None, :]
else:
out = sph_harm.reconstruct_sh(self.coeff, self.basis_val)[None, :]
return out
def get_lighting_params(self, lighting_idx):
return self.coeff[lighting_idx, :] # [num_sample, num_channel]
def normalize_lighting(self, lighting_ref_idx):
lighting_ref_norm = self.coeff[lighting_ref_idx, :].norm('fro')
norm_scale_factor = lighting_ref_norm / self.coeff.norm('fro', dim = [1, 2])
norm_scale_factor[lighting_ref_idx] = 1.0
self.coeff *= norm_scale_factor[:, None, None]
def reconstruct_lp(self, coeff):
'''
coeff: [num_basis, C] or [num_lighting, num_basis, C]
'''
lp_recon = sph_harm.reconstruct_sh(coeff, self.basis_val_recon).reshape((int(self.lp_recon_h), int(self.lp_recon_w), -1)) # [H, W, C] or [num_lighting, H, W, C]
return lp_recon
# Light Probe model
class LightingLP(nn.Module):
def __init__(self, l_dir, num_lighting = 1, num_channel = 3, lp_dataloader = None, fix_params = False, lp_img_h = 1600, lp_img_w = 3200):
'''
l_dir: torch.FloatTensor, [3, num_sample], sampled light directions
num_lighting: int, number of lighting
num_channel: int, number of color channels
lp_dataloader: dataloader for light probes (if not None, num_lighting is ignored)
fix_params: bool, whether fix parameters
'''
super().__init__()
self.register_buffer('l_dir', l_dir) # [3, num_sample]
self.num_sample = l_dir.shape[1]
self.num_lighting = num_lighting
self.num_channel = num_channel
self.fix_params = fix_params
self.lp_img_h = lp_img_h
self.lp_img_w = lp_img_w
if lp_dataloader is not None:
self.num_lighting = len(lp_dataloader)
# spherical mapping to get light probe uv
l_samples_uv = render.spherical_mapping(l_dir)
self.register_buffer('l_samples_uv', l_samples_uv) # [2, num_sample]
# light samples as learnable parameters
self.l_samples = nn.Parameter(torch.zeros((self.num_lighting, self.num_sample, self.num_channel), dtype = torch.float32)) # [num_lighting, num_sample, num_channel]
# initialize light samples from light probes
if lp_dataloader is not None:
self.num_lighting = len(lp_dataloader)
lp_idx = 0
lps = []
for lp in lp_dataloader:
lp_img = lp['lp_img'][0, :].permute((1, 2, 0))
lps.append(torch.from_numpy(cv2.resize(lp_img.cpu().detach().numpy(), (lp_img_w, lp_img_h), interpolation = cv2.INTER_AREA))) # [H, W, C]
lp_img = lps[-1]
self.l_samples.data[lp_idx, :] = misc.interpolate_bilinear(lp_img.to(self.l_samples_uv.device), (self.l_samples_uv[None, 0, :] * float(lp_img.shape[1])).clamp(max = lp_img.shape[1] - 1), (self.l_samples_uv[None, 1, :] * float(lp_img.shape[0])).clamp(max = lp_img.shape[0] - 1))[0, :]
lp_idx += 1
lps = torch.stack(lps)
self.register_buffer('lps', lps) # [num_lighting, H, W, C]
# change to non-learnable
if self.fix_params:
self.l_samples.requires_grad_(False)
def forward(self, lighting_idx = None, is_lp = False):
'''
return: [1, num_lighting, num_sample, num_channel] or [1, num_sample, num_channel]
'''
if is_lp:
if lighting_idx is None:
return self.lps[None, :]
else:
return self.lps[lighting_idx, :][None, :]
else:
if lighting_idx is None:
return self.l_samples[None, :]
else:
return self.l_samples[lighting_idx, :][None, :]
def fit_sh(self, lmax):
print('LightingLP.fit_sh: Computing SH basis value on sampled directions...')
basis_val = torch.from_numpy(sph_harm.evaluate_sh_basis(lmax = lmax, directions = self.l_dir.cpu().detach().numpy().transpose())).to(self.l_dir.dtype).to(self.l_dir.device) # [num_sample, num_basis]
sh_coeff = sph_harm.fit_sh_coeff(samples = self.l_samples.to(self.l_dir.device), sh_basis_val = basis_val) # [num_lighting, num_basis, num_channel]
self.register_buffer('sh_coeff', sh_coeff)
return
##########################################################################################################################
################################################## Modules for Aligning ##################################################
##########################################################################################################################
class AlignModule(nn.Module):
def __init__(self, input_channels, ref_channels, mid_channels, out_channels):
super(AlignModule, self).__init__()
self.down_h = | |
<filename>pyjswidgets/pyjslib.PyJS.py
# Copyright 2006 <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# iteration from <NAME>'s Iteration in JavaScript
# must declare import _before_ importing sys
# FIXME: dynamic=1, async=False, init=True are useless here (?)
def import_module(path, parent_module, module_name, dynamic=1, async=False, init=True):
module = None
JS("""
@{{module}} = $pyjs['modules_hash'][@{{module_name}}];
if (typeof @{{module}} == 'function' && @{{module}}['__was_initialized__'] == true) {
return null;
}
if (@{{module_name}} == 'sys' || @{{module_name}} == 'pyjslib') {
@{{module}}();
return null;
}
""")
names = module_name.split(".")
importName = ''
# Import all modules in the chain (import a.b.c)
for name in names:
importName += name
JS("""@{{module}} = $pyjs['modules_hash'][@{{importName}}];""")
if isUndefined(module):
raise ImportError("No module named " + importName)
if JS("@{{module}}['__was_initialized__'] != true"):
# Module wasn't initialized
module()
importName += '.'
return None
# FIXME: dynamic=1, async=False are useless here (?). Only dynamic modules
# are loaded with load_module and it's always "async"
@noSourceTracking
def load_module(path, parent_module, module_name, dynamic=1, async=False):
"""
"""
JS("""
var cache_file;
var module = $pyjs['modules_hash'][@{{module_name}}];
if (typeof module == 'function') {
return true;
}
if (!@{{dynamic}}) {
// There's no way we can load a none dynamic module
return false;
}
if (@{{path}} == null)
{
@{{path}} = './';
}
var override_name = @{{sys}}['platform'] + "." + @{{module_name}};
if (((@{{sys}}['overrides'] != null) &&
(@{{sys}}['overrides']['has_key'](override_name))))
{
cache_file = @{{sys}}['overrides']['__getitem__'](override_name) ;
}
else
{
cache_file = @{{module_name}} ;
}
cache_file = (@{{path}} + cache_file + '['cache']['js']' ) ;
//alert("cache " + cache_file + " " + module_name + " " + parent_module);
var onload_fn = '';
// this one tacks the script onto the end of the DOM
pyjs_load_script(cache_file, onload_fn, @{{async}});
try {
var loaded = (typeof $pyjs['modules_hash'][@{{module_name}}] == 'function')
} catch ( e ) {
}
if (loaded) {
return true;
}
return false;
""")
@noSourceTracking
def load_module_wait(proceed_fn, parent_mod, module_list, dynamic):
module_list = module_list.getArray()
JS("""
var wait_count = 0;
//var data = '';
//var element = $doc['createElement']("div");
//element['innerHTML'] = '';
//$doc['body']['appendChild'](element);
//function write_dom(txt) {
// element['innerHTML'] += txt;
//}
var timeoutperiod = 1;
if (@{{dynamic}})
var timeoutperiod = 1;
var wait = function() {
wait_count++;
//write_dom(".");
var loaded = true;
for (var i in @{{module_list}}) {
if (typeof $pyjs['modules_hash'][@{{module_list}}[i]] != 'function') {
loaded = false;
break;
}
}
if (!loaded) {
setTimeout(wait, timeoutperiod);
} else {
if (@{{proceed_fn}}['importDone'])
@{{proceed_fn}}['importDone'](@{{proceed_fn}});
else
@{{proceed_fn}}();
//$doc['body']['removeChild'](element);
}
}
//write_dom("Loading modules ");
wait();
""")
class Modload:
# All to-be-imported module names are in app_modlist
# Since we're only _loading_ the modules here, we can do that in almost
# any order. There's one limitation: a child/sub module cannot be loaded
# unless its parent is loaded. It has to be chained in the module list.
# (1) $pyjs.modules.pyjamas
# (2) $pyjs.modules.pyjamas.ui
# (3) $pyjs.modules.pyjamas.ui.Widget
# Therefore, all modules are collected and sorted on the depth (i.e. the
# number of dots in it)
# As long as we don't move on to the next depth unless all modules of the
# previous depth are loaded, we won't trun into unchainable modules
# The execution of the module code is done when the import statement is
# reached, or after loading the modules for the main module.
@noSourceTracking
def __init__(self, path, app_modlist, app_imported_fn, dynamic,
parent_mod):
self.app_modlist = app_modlist
self.app_imported_fn = app_imported_fn
self.path = path
self.dynamic = dynamic
self.parent_mod = parent_mod
self.modules = {}
for modlist in self.app_modlist:
for mod in modlist:
depth = len(mod.split('.'))
if not self.modules.has_key(depth):
self.modules[depth] = []
self.modules[depth].append(mod)
self.depths = self.modules.keys()
self.depths.sort()
self.depths.reverse()
@noSourceTracking
def next(self):
if not self.dynamic:
# All modules are static. Just start the main module.
self.app_imported_fn()
return
depth = self.depths.pop()
# Initiate the loading of the modules.
for app in self.modules[depth]:
load_module(self.path, self.parent_mod, app, self.dynamic, True);
if len(self.depths) == 0:
# This is the last depth. Start the main module after loading these
# modules.
load_module_wait(self.app_imported_fn, self.parent_mod, self.modules[depth], self.dynamic)
else:
# After loading the modules, to the next depth.
load_module_wait(getattr(self, "next"), self.parent_mod, self.modules[depth], self.dynamic)
def get_module(module_name):
ev = "__mod = %s;" % module_name
JS("pyjs_eval(@{{ev}});")
return __mod
def preload_app_modules(path, app_modnames, app_imported_fn, dynamic,
parent_mod=None):
loader = Modload(path, app_modnames, app_imported_fn, dynamic, parent_mod)
loader.next()
class BaseException:
message = ''
def __init__(self, *args):
self.args = args
if len(args) == 1:
self.message = args[0]
def __getitem__(self, index):
return self.args.__getitem__(index)
def __str__(self):
if len(self.args) is 0:
return ''
elif len(self.args) is 1:
return str(self.message)
return repr(self.args)
def __repr__(self):
return self.__name__ + repr(self.args)
def toString(self):
return str(self)
class Exception(BaseException):
pass
class StandardError(Exception):
pass
class TypeError(StandardError):
pass
class AttributeError(StandardError):
def toString(self):
return "AttributeError: %s of %s" % (self.args[1], self.args[0])
class NameError(StandardError):
pass
class ValueError(StandardError):
pass
class ImportError(StandardError):
pass
class LookupError(StandardError):
def toString(self):
return self.__name__ + ": " + self.args[0]
class KeyError(LookupError):
def __str__(self):
if len(self.args) is 0:
return ''
elif len(self.args) is 1:
return repr(self.message)
return repr(self.args)
class IndexError(LookupError):
pass
# There seems to be an bug in Chrome with accessing the message
# property, on which an error is thrown
# Hence the declaration of 'var message' and the wrapping in try..catch
def init():
JS("""
pyjslib['_errorMapping'] = function(err) {
if (err instanceof(ReferenceError) || err instanceof(TypeError)) {
var message = ''
try {
message = err['message'];
} catch ( e) {
}
return pyjslib['AttributeError'](message);
}
return err
}
pyjslib['TryElse'] = function () { };
pyjslib['TryElse']['prototype'] = new Error();
pyjslib['TryElse']['__name__'] = 'TryElse';
pyjslib['TryElse']['message'] = 'TryElse';
pyjslib['StopIteration'] = function () { };
pyjslib['StopIteration']['prototype'] = new Error();
pyjslib['StopIteration']['__name__'] = 'StopIteration';
pyjslib['StopIteration']['message'] = 'StopIteration';
pyjslib['String_find'] = function(sub, start, end) {
var pos=this['indexOf'](sub, start);
if (pyjslib['isUndefined'](end)) return pos;
if (pos + sub['length']>end) return -1;
return pos;
}
pyjslib['String_join'] = function(data) {
var text="";
if (pyjslib['isArray'](data)) {
return data['join'](this);
}
else if (pyjslib['isIteratable'](data)) {
var iter=data['__iter__']();
try {
text+=iter['next']();
while (true) {
var item=iter['next']();
text+=this + item;
}
}
catch (e) {
if (e['__name__'] != 'StopIteration') throw e;
}
}
return text;
}
pyjslib['String_isdigit'] = function() {
return (this['match'](/^\d+$/g) != null);
}
pyjslib['String_replace'] = function(old, replace, count) {
var do_max=false;
var start=0;
var new_str="";
var pos=0;
if (!pyjslib['isString'](old)) return this['__replace'](old, replace);
if (!pyjslib['isUndefined'](count)) do_max=true;
while (start<this['length']) {
if (do_max && !count--) break;
pos=this['indexOf'](old, start);
if (pos<0) break;
new_str+=this['substring'](start, pos) + replace;
start=pos+old['length'];
}
if (start<this['length']) new_str+=this['substring'](start);
return new_str;
}
pyjslib['String_split'] = function(sep, maxsplit) {
var items=new pyjslib['List']();
var do_max=false;
var subject=this;
var start=0;
var pos=0;
if (pyjslib['isUndefined'](sep) || pyjslib['isNull'](sep)) {
sep=" ";
subject=subject['strip']();
subject=subject['replace'](/\s+/g, sep);
}
else if (!pyjslib['isUndefined'](maxsplit)) do_max=true;
if (subject['length'] == 0) {
return items;
}
while (start<subject['length']) {
if (do_max && !maxsplit--) break;
pos=subject['indexOf'](sep, start);
if (pos<0) break;
items['append'](subject['substring'](start, pos));
start=pos+sep['length'];
}
if (start<=subject['length']) items['append'](subject['substring'](start));
return items;
}
pyjslib['String___iter__'] = function() {
var i = 0;
var s = this;
return {
'next': function() {
if (i >= s['length']) {
throw pyjslib['StopIteration'];
}
return s['substring'](i++, i, 1);
},
'__iter__': function() {
return this;
}
};
}
pyjslib['String_strip'] = function(chars) {
return this['lstrip'](chars)['rstrip'](chars);
}
pyjslib['String_lstrip'] = function(chars) {
if (pyjslib['isUndefined'](chars)) return this['replace'](/^\s+/, "");
return this['replace'](new RegExp("^[" + chars + "]+"), "");
}
pyjslib['String_rstrip'] = function(chars) {
if (pyjslib['isUndefined'](chars)) return this['replace'](/\s+$/, "");
return this['replace'](new RegExp("[" + chars + "]+$"), "");
}
pyjslib['String_startswith'] = function(prefix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (pyjslib['isUndefined'](start)) start = 0;
if (pyjslib['isUndefined'](end)) end = this['length'];
if ((end - start) < prefix['length']) return false
if (this['substr'](start, prefix['length']) == prefix) return true;
return false;
}
pyjslib['String_endswith'] = function(suffix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (pyjslib['isUndefined'](start)) start = 0;
if (pyjslib['isUndefined'](end)) end = this['length'];
if ((end - start) < suffix['length']) return false
if (this['substr'](end - suffix['length'], suffix['length']) == suffix) return true;
return false;
}
pyjslib['String_ljust'] = function(width, fillchar) {
if (typeof(width) != 'number' ||
parseInt(width) != width) {
throw (pyjslib['TypeError']("an integer is required"));
}
if (pyjslib['isUndefined'](fillchar)) fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar['length'] != 1) {
throw (pyjslib['TypeError']("ljust() argument 2 must be char, | |
= v.replace('_' + vars[n]['kindselector']['kind'], '')
# Again, this will be true if even a single specifier
# has been replaced, see comment above.
is_replaced = len(v) < orig_v_len
if not is_replaced:
if not selected_kind_re.match(v):
v_ = v.split('_')
# In case there are additive parameters
if len(v_) > 1:
v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '')
# Currently this will not work for complex numbers.
# There is missing code for extracting a complex number,
# which may be defined in either of these:
# a) (Re, Im)
# b) cmplx(Re, Im)
# c) dcmplx(Re, Im)
# d) cmplx(Re, Im, <prec>)
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
elif iscomplex(vars[n]):
# FIXME complex numbers may also have exponents
if v[0] == '(' and v[-1] == ')':
# FIXME, unused l looks like potential bug
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(
'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError, TypeError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars[n], k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'[A-Za-z][\w$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d == ':':
star = ':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
if d == star:
dl = [star]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and not dl[0] == star:
dl = ['1', dl[0]]
if len(dl) == 2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ':
d = d[4:]
if di and di[-4:] == '/(1)':
di = di[:-4]
if v:
savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape' # 'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'
% (d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension'] + [d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess(
"analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend'] = []
vars[n]['check'] = []
if 'dimension' in vars[n]:
#/----< no check
i = -1
ni = len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps = [] # dependencies of 'd'
ad = ''
pd = ''
if d not in vars:
if d in savelindims:
pd, ad = '(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
if r not in vars:
continue
if re.match(r'.*?\b' + r + r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6] == 'depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps = ddeps + vars[d]['depend']
i = i + 1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend'] = [n]
if ni > 1:
vars[d]['='] = '%s%s(%s,%s)%s' % (
pd, shape_macro, n, i, ad)
else:
vars[d]['='] = '%slen(%s)%s' % (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni > 1:
vars[d]['check'] = ['%s%s(%s,%i)%s==%s'
% (pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check'] = [
'%slen(%s)%s>=%s' % (pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec'] = ['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length = '1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block | |
flux_lm of fluxed_qubit
nested_mc = q0.instr_nested_MC.get_instr() # device object has no nested MC object, get from qubit object
mc = self.instr_MC.get_instr()
conv_cost_det = det.Function_Detector( get_function=czcf.conventional_CZ_cost_func,
msmt_kw={'device': self, 'FL_LutMan_QR': flux_lm,
'MC': mc,'waveform_name': 'cz_{}'.format(gate),
'qubits': [q0.name, q1.name], 'parked_qubit_seq': parked_seq},
value_names=['Cost function value',
'Conditional phase', 'offset difference', 'missing fraction',
'Q0 phase', 'Park Phase OFF', 'Park Phase ON'],
result_keys=['cost_function_val',
'delta_phi', 'offset_difference', 'missing_fraction',
'single_qubit_phase_0', 'park_phase_off', 'park_phase_on'],
value_units=['a.u.', 'deg', '%', '%', 'deg', 'deg', 'deg'])
# 1D Scan of phase corrections after flux pulse
#value_min = flux_lm.cz_phase_corr_amp_SW()-phase_offset
value_min = getattr(flux_lm, 'cz_phase_corr_amp_' + gate )()-phase_offset_sq
#value_max = flux_lm.cz_phase_corr_amp_SW()+phase_offset
value_max = getattr(flux_lm, 'cz_phase_corr_amp_' + gate )()+phase_offset_sq
label = 'CZ_1D_sweep_phase_corr_{}'.format(gate)
nested_mc.set_sweep_function(getattr(flux_lm, 'cz_phase_corr_amp_' + gate ))
nested_mc.set_sweep_points(np.linspace(value_min, value_max, 10))
nested_mc.set_detector_function(conv_cost_det)
result = nested_mc.run(label)
# Use ch_to_analyze as 4 for single qubit phases ('Q0 phase')
a_obj = ma2.Crossing_Analysis(label=label,
ch_idx='Q0 phase',
target_crossing=0)
crossed_value = a_obj.proc_data_dict['root']
getattr(flux_lm, 'cz_phase_corr_amp_' + gate )(crossed_value)
def calibrate_cz_thetas(self, phase_offset: float = 1,
operation_pairs: list = [(['QNW','QC'],'SE'), (['QNE','QC'],'SW'),
(['QC','QSW','QSE'],'SW'), (['QC','QSE','QSW'],'SE')]):
# Set 'qubits': [q0.name, q1.name] and 'parked_qubit_seq': 'ground'
for operation_tuple in operation_pairs:
pair, gate = operation_tuple
parked_seq = 'ground'
q0 = self.find_instrument(pair[0]) # ramsey qubit (we make this be the fluxed one)
q1 = self.find_instrument(pair[1]) # control qubit
q2 = None
gate = gate
# cf.counter_param(0)
flux_lm = q0.instr_LutMan_Flux.get_instr() # flux_lm of fluxed_qubit
nested_mc = q0.instr_nested_MC.get_instr() # device object has no nested MC object, get from qubit object
mc = self.instr_MC.get_instr()
conv_cost_det = det.Function_Detector( get_function=czcf.conventional_CZ_cost_func,
msmt_kw={'device': self, 'FL_LutMan_QR': flux_lm,
'MC': mc,'waveform_name': 'cz_{}'.format(gate),
'qubits': [q0.name, q1.name], 'parked_qubit_seq': parked_seq},
value_names=['Cost function value',
'Conditional phase', 'offset difference', 'missing fraction',
'Q0 phase', 'Park Phase OFF', 'Park Phase ON'],
result_keys=['cost_function_val',
'delta_phi', 'offset_difference', 'missing_fraction',
'single_qubit_phase_0', 'park_phase_off', 'park_phase_on'],
value_units=['a.u.', 'deg', '%', '%', 'deg', 'deg', 'deg'])
# 1D Scan of phase corrections after flux pulse
value_min = getattr(flux_lm, 'cz_theta_f_' + gate )()-phase_offset
#value_max = flux_lm.cz_phase_corr_amp_SW()+phase_offset
value_max = getattr(flux_lm, 'cz_theta_f_' + gate )()+phase_offset
label = 'CZ_1D_sweep_theta_{}'.format(gate)
nested_mc.set_sweep_function(getattr(flux_lm, 'cz_theta_f_' + gate ))
nested_mc.set_sweep_points(np.linspace(value_min, value_max, 10))
nested_mc.set_detector_function(conv_cost_det)
result = nested_mc.run(label)
# Use ch_to_analyze as 4 for single qubit phases ('Q0 phase')
a_obj = ma2.Crossing_Analysis(label=label,
ch_idx='Conditional phase',
target_crossing=180)
crossed_value = a_obj.proc_data_dict['root']
getattr(flux_lm, 'cz_theta_f_' + gate )(crossed_value)
def calibrate_multi_frequency_fine(self, qubits: list = None, times=None,
artificial_periods: float = None,
MC=None, prepare_for_timedomain=True,
update_T2=False, update_frequency=True,
stepsize: float = None, termination_opt=0,
steps=[1, 1, 3, 10, 30, 100, 300, 1000]):
if qubits is None:
qubits = self.qubits()
if artificial_periods is None:
artificial_periods = 2.5
if stepsize is None:
stepsize = 20e-9
for n in steps:
times = []
for q in qubits:
qub = self.find_instrument(q)
time = np.arange(0, 50 * n * stepsize, n * stepsize)
times.append(time)
label = 'Multi_Ramsey_{}_pulse_sep_'.format(n) + '_'.join(qubits)
a = self.measure_multi_ramsey(qubits=qubits, times=times, MC=MC, GBT=False,
artificial_periods=artificial_periods, label=label,
prepare_for_timedomain=prepare_for_timedomain,
update_frequency=False, update_T2=update_T2)
for q in qubits:
qub = self.find_instrument(q)
freq = a.proc_data_dict['quantities_of_interest'][q]['freq_new']
T2 = a.proc_data_dict['quantities_of_interest'][q]['tau']
fit_error = a.proc_data_dict['{}_fit_res'.format(q)].chisqr
if (times[0][-1] < 2. * T2) and (update_frequency is True):
# If the last step is > T2* then the next will be for sure
qub.freq_qubit(freq)
T2_max = max(a.proc_data_dict['quantities_of_interest'][q]['tau'] for q in qubits)
if times[0][-1] > 2. * T2_max:
# If the last step is > T2* then the next will be for sure
print('Breaking of measurement because of T2*')
break
return True
########################################################
# other methods
########################################################
def create_dep_graph(self):
dags = []
for qi in self.qubits():
q_obj = self.find_instrument(qi)
if hasattr(q_obj, "_dag"):
dag = q_obj._dag
else:
dag = q_obj.create_dep_graph()
dags.append(dag)
dag = nx.compose_all(dags)
dag.add_node(self.name + " multiplexed readout")
dag.add_node(self.name + " resonator frequencies coarse")
dag.add_node("AWG8 MW-staircase")
dag.add_node("AWG8 Flux-staircase")
# Timing of channels can be done independent of the qubits
# it is on a per frequency per feedline basis so not qubit specific
dag.add_node(self.name + " mw-ro timing")
dag.add_edge(self.name + " mw-ro timing", "AWG8 MW-staircase")
dag.add_node(self.name + " mw-vsm timing")
dag.add_edge(self.name + " mw-vsm timing", self.name + " mw-ro timing")
for edge_L, edge_R in self.qubit_edges():
dag.add_node("Chevron {}-{}".format(edge_L, edge_R))
dag.add_node("CZ {}-{}".format(edge_L, edge_R))
dag.add_edge(
"CZ {}-{}".format(edge_L, edge_R),
"Chevron {}-{}".format(edge_L, edge_R),
)
dag.add_edge(
"CZ {}-{}".format(edge_L, edge_R), "{} cryo dist. corr.".format(edge_L)
)
dag.add_edge(
"CZ {}-{}".format(edge_L, edge_R), "{} cryo dist. corr.".format(edge_R)
)
dag.add_edge(
"Chevron {}-{}".format(edge_L, edge_R),
"{} single qubit gates fine".format(edge_L),
)
dag.add_edge(
"Chevron {}-{}".format(edge_L, edge_R),
"{} single qubit gates fine".format(edge_R),
)
dag.add_edge("Chevron {}-{}".format(edge_L, edge_R), "AWG8 Flux-staircase")
dag.add_edge(
"Chevron {}-{}".format(edge_L, edge_R),
self.name + " multiplexed readout",
)
dag.add_node("{}-{} mw-flux timing".format(edge_L, edge_R))
dag.add_edge(
edge_L + " cryo dist. corr.",
"{}-{} mw-flux timing".format(edge_L, edge_R),
)
dag.add_edge(
edge_R + " cryo dist. corr.",
"{}-{} mw-flux timing".format(edge_L, edge_R),
)
dag.add_edge(
"Chevron {}-{}".format(edge_L, edge_R),
"{}-{} mw-flux timing".format(edge_L, edge_R),
)
dag.add_edge(
"{}-{} mw-flux timing".format(edge_L, edge_R), "AWG8 Flux-staircase"
)
dag.add_edge(
"{}-{} mw-flux timing".format(edge_L, edge_R),
self.name + " mw-ro timing",
)
for qubit in self.qubits():
dag.add_edge(qubit + " ro pulse-acq window timing", "AWG8 MW-staircase")
dag.add_edge(qubit + " room temp. dist. corr.", "AWG8 Flux-staircase")
dag.add_edge(self.name + " multiplexed readout", qubit + " optimal weights")
dag.add_edge(
qubit + " resonator frequency",
self.name + " resonator frequencies coarse",
)
dag.add_edge(qubit + " pulse amplitude coarse", "AWG8 MW-staircase")
for qi in self.qubits():
q_obj = self.find_instrument(qi)
# ensures all references are to the main dag
q_obj._dag = dag
self._dag = dag
return dag
##########################################################################
# private functions
##########################################################################
def _add_parameters(self):
self.add_parameter(
'qubits',
parameter_class=ManualParameter,
initial_value=[],
vals=vals.Lists(elt_validator=vals.Strings())
)
self.add_parameter(
'qubit_edges',
parameter_class=ManualParameter,
docstring="Denotes edges that connect qubits. "
"Used to define the device topology.",
initial_value=[[]],
vals=vals.Lists(elt_validator=vals.Lists(elt_validator=vals.Strings()))
)
self.add_parameter(
'qubits_by_feedline',
parameter_class=ManualParameter,
docstring="Qubits divided by feedline."
"Used to sort qubits for timedomain preparation.",
initial_value=[[]],
vals=vals.Lists(elt_validator=vals.Lists(elt_validator=vals.Strings()))
)
self.add_parameter(
'ro_lo_freq',
unit='Hz',
docstring='Frequency of the common LO for all RO pulses.',
parameter_class=ManualParameter
)
# actually, it should be possible to build the integration
# weights obeying different settings for different
# qubits, but for now we use a fixed common value.
self.add_parameter(
"ro_acq_integration_length",
initial_value=500e-9,
vals=vals.Numbers(min_value=0, max_value=20e6),
parameter_class=ManualParameter,
)
self.add_parameter(
"ro_pow_LO",
label="RO power LO",
unit="dBm",
initial_value=20,
parameter_class=ManualParameter,
)
self.add_parameter(
"ro_acq_averages",
initial_value=1024,
vals=vals.Numbers(min_value=0, max_value=1e6),
parameter_class=ManualParameter,
)
self.add_parameter(
"ro_acq_delay",
unit="s",
label="Readout acquisition delay",
vals=vals.Numbers(min_value=0),
initial_value=0,
parameter_class=ManualParameter,
docstring=(
"The time between the instruction that trigger the"
" readout pulse and the instruction that triggers the "
"acquisition. The positive number means that the "
"acquisition is started after the pulse is send."
),
)
self.add_parameter(
"instr_MC",
label="MeasurementControl",
parameter_class=InstrumentRefParameter,)
self.add_parameter('instr_nested_MC',
label='Nested MeasurementControl',
parameter_class=InstrumentRefParameter)
self.add_parameter(
"instr_VSM",
label="Vector Switch Matrix",
parameter_class=InstrumentRefParameter,
)
self.add_parameter(
"instr_CC",
label="Central Controller",
docstring=(
"Device responsible for controlling the experiment"
" using eQASM generated using OpenQL, in the near"
" future will be the CC_Light."
),
parameter_class=InstrumentRefParameter,
)
for i in range(3): # S17 has 3 feedlines
self.add_parameter(
"instr_acq_{}".format(i), parameter_class=InstrumentRefParameter
)
# Two microwave AWGs are used for S17
self.add_parameter("instr_AWG_mw_0", parameter_class=InstrumentRefParameter)
self.add_parameter("instr_AWG_mw_1", parameter_class=InstrumentRefParameter)
self.add_parameter("instr_AWG_mw_2", parameter_class=InstrumentRefParameter)
self.add_parameter("instr_AWG_mw_3", parameter_class=InstrumentRefParameter)
self.add_parameter("instr_AWG_mw_4", parameter_class=InstrumentRefParameter)
self.add_parameter("instr_AWG_flux_0", parameter_class=InstrumentRefParameter)
self.add_parameter("instr_AWG_flux_1", parameter_class=InstrumentRefParameter)
self.add_parameter("instr_AWG_flux_2", parameter_class=InstrumentRefParameter)
ro_acq_docstr = (
"Determines what type of integration weights to use: "
"\n\t SSB: Single sideband demodulation\n\t"
'optimal: waveforms specified in "RO_acq_weight_func_I" '
'\n\tand "RO_acq_weight_func_Q"'
)
self.add_parameter(
"ro_acq_weight_type",
initial_value="SSB",
vals=vals.Enum("SSB", "optimal","optimal IQ"),
docstring=ro_acq_docstr,
parameter_class=ManualParameter,
)
self.add_parameter(
"ro_acq_digitized",
vals=vals.Bool(),
initial_value=False,
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_openql_platform_fn",
label="OpenQL platform configuration filename",
parameter_class=ManualParameter,
vals=vals.Strings(),
)
self.add_parameter(
"ro_always_all",
docstring="If true, configures the UHFQC to RO all qubits "
"independent of codeword received.",
parameter_class=ManualParameter,
vals=vals.Bool(),
)
# Timing related parameters
self.add_parameter(
"tim_ro_latency_0",
unit="s",
label="Readout latency 0",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_ro_latency_1",
unit="s",
label="Readout latency 1",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_ro_latency_2",
unit="s",
label="Readout latency 2",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_flux_latency_0",
unit="s",
label="Flux latency 0",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_flux_latency_1",
unit="s",
label="Flux latency 1",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_flux_latency_2",
unit="s",
label="Flux latency 2",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_mw_latency_0",
unit="s",
label="Microwave latency 0",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_mw_latency_1",
unit="s",
label="Microwave latency 1",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_mw_latency_2",
unit="s",
label="Microwave latency 2",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_mw_latency_3",
unit="s",
label="Microwave latency 3",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"tim_mw_latency_4",
unit="s",
label="Microwave latency 4",
parameter_class=ManualParameter,
initial_value=0,
vals=vals.Numbers(),
)
self.add_parameter(
"dio_map",
docstring="The map between DIO"
" channel number and functionality (ro_x, mw_x, flux_x). "
"From 2020-03-19 on, Requires to be configured by the user in each set up. "
"For convenience here are the mapping for the devices with fixed mappings:\n"
"CCL:\n"
" {\n"
" 'ro_0': 1,\n"
" 'ro_1': | |
<gh_stars>1-10
import sys
import time
import logging
import binascii
import functools
import idaapi
import idautils
from .api import DisassemblerAPI, DockableShim
from ..qt import *
from ..misc import is_mainthread
logger = logging.getLogger("Lighthouse.API.IDA")
#------------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------------
def execute_sync(function, sync_type):
"""
Synchronize with the disassembler for safe database access.
Modified from https://github.com/vrtadmin/FIRST-plugin-ida
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
output = [None]
#
# this inline function definition is technically what will execute
# in the context of the main thread. we use this thunk to capture
# any output the function may want to return to the user.
#
def thunk():
output[0] = function(*args, **kwargs)
return 1
if is_mainthread():
thunk()
else:
idaapi.execute_sync(thunk, sync_type)
# return the output of the synchronized execution
return output[0]
return wrapper
#------------------------------------------------------------------------------
# Disassembler API
#------------------------------------------------------------------------------
class IDAAPI(DisassemblerAPI):
"""
The IDA implementation of the disassembler API abstraction.
"""
NAME = "IDA"
#
# in IDA 7.0, Hex-Rays refactored the IDA API quite a bit. This
# impacts Lighthouse in a few places, so we use version checks at
# these junctions to determine which API's to use (v7.x or v6.x)
#
# search 'USING_IDA7API' in the codebase for example cases
#
USING_IDA7API = bool(idaapi.IDA_SDK_VERSION >= 700)
def __init__(self):
super(IDAAPI, self).__init__()
self._init_version()
def _init_version(self):
# retrieve IDA's version #
disassembler_version = idaapi.get_kernel_version()
major, minor = map(int, disassembler_version.split("."))
# save the version number components for later use
self._version_major = major
self._version_minor = minor
self._version_patch = 0
#--------------------------------------------------------------------------
# Properties
#--------------------------------------------------------------------------
@property
def version_major(self):
return self._version_major
@property
def version_minor(self):
return self._version_minor
@property
def version_patch(self):
return self._version_patch
@property
def headless(self):
return False
#--------------------------------------------------------------------------
# Synchronization Decorators
#--------------------------------------------------------------------------
@staticmethod
def execute_read(function):
return execute_sync(function, idaapi.MFF_READ)
@staticmethod
def execute_write(function):
return execute_sync(function, idaapi.MFF_WRITE)
@staticmethod
def execute_ui(function):
return execute_sync(function, idaapi.MFF_FAST)
#--------------------------------------------------------------------------
# API Shims
#--------------------------------------------------------------------------
def create_rename_hooks(self):
if self.USING_IDA7API:
class RenameHooks(idaapi.IDB_Hooks):
pass
else:
class RenameHooks(idaapi.IDP_Hooks):
pass
return RenameHooks()
def get_database_directory(self):
return idautils.GetIdbDir()
def get_disassembler_user_directory(self):
return idaapi.get_user_idadir()
def get_function_addresses(self):
return list(idautils.Functions())
def get_function_name_at(self, address):
return idaapi.get_short_name(address)
def get_function_raw_name_at(self, function_address):
if self.USING_IDA7API:
return idaapi.get_name(function_address)
return idaapi.get_true_name(idaapi.BADADDR, function_address)
def get_imagebase(self):
return idaapi.get_imagebase()
def get_root_filename(self):
return idaapi.get_root_filename()
def navigate(self, address):
return idaapi.jumpto(address)
def set_function_name_at(self, function_address, new_name):
idaapi.set_name(function_address, new_name, idaapi.SN_NOWARN)
#--------------------------------------------------------------------------
# UI API Shims
#--------------------------------------------------------------------------
def get_disassembly_background_color(self):
"""
Get the background color of the IDA disassembly view.
Since there is no supported way to probe the palette & colors in use by
IDA, we must get creative. This function attempts to locate an IDA
disassembly view, and take a screenshot of said widget. It will then
attempt to extract the color of a single background pixel (hopefully).
"""
if self.USING_IDA7API:
return self._get_ida_bg_color_ida7()
else:
return self._get_ida_bg_color_ida6()
def is_msg_inited(self):
return idaapi.is_msg_inited()
def warning(self, text):
idaapi.warning(text)
#------------------------------------------------------------------------------
# Function Prefix API
#------------------------------------------------------------------------------
PREFIX_SEPARATOR = "%"
#--------------------------------------------------------------------------
# Theme Prediction Helpers (Internal)
#--------------------------------------------------------------------------
def _get_ida_bg_color_ida7(self):
"""
Get the background color of the IDA disassembly view. (IDA 7+)
"""
names = ["Enums", "Structures"]
names += ["Hex View-%u" % i for i in range(5)]
names += ["IDA View-%c" % chr(ord('A') + i) for i in range(5)]
# find a form (eg, IDA view) to analyze colors from
for window_name in names:
twidget = idaapi.find_widget(window_name)
if twidget:
break
else:
raise RuntimeError("Failed to find donor view")
# touch the target form so we know it is populated
self._touch_ida_window(twidget)
# locate the Qt Widget for a form and take 1px image slice of it
import sip
widget = sip.wrapinstance(long(twidget), QtWidgets.QWidget)
pixmap = widget.grab(QtCore.QRect(0, 10, widget.width(), 1))
# convert the raw pixmap into an image (easier to interface with)
image = QtGui.QImage(pixmap.toImage())
# return the predicted background color
return QtGui.QColor(predict_bg_color(image))
def _get_ida_bg_color_ida6(self):
"""
Get the background color of the IDA disassembly view. (IDA 6.x)
"""
names = ["Enums", "Structures"]
names += ["Hex View-%u" % i for i in range(5)]
names += ["IDA View-%c" % chr(ord('A') + i) for i in range(5)]
# find a form (eg, IDA view) to analyze colors from
for window_name in names:
form = idaapi.find_tform(window_name)
if form:
break
else:
raise RuntimeError("Failed to find donor View")
# touch the target form so we know it is populated
self._touch_ida_window(form)
# locate the Qt Widget for a form and take 1px image slice of it
if USING_PYQT5:
widget = idaapi.PluginForm.FormToPyQtWidget(form, sys.modules[__name__])
pixmap = widget.grab(QtCore.QRect(0, 10, widget.width(), 1))
else:
widget = idaapi.PluginForm.FormToPySideWidget(form, sys.modules[__name__])
region = QtCore.QRect(0, 10, widget.width(), 1)
pixmap = QtGui.QPixmap.grabWidget(widget, region)
# convert the raw pixmap into an image (easier to interface with)
image = QtGui.QImage(pixmap.toImage())
# return the predicted background color
return QtGui.QColor(predict_bg_color(image))
def _touch_ida_window(self, target):
"""
Touch a window/widget/form to ensure it gets drawn by IDA.
XXX/HACK:
We need to ensure that widget we will analyze actually gets drawn
so that there are colors for us to steal.
To do this, we switch to it, and switch back. I tried a few different
ways to trigger this from Qt, but could only trigger the full
painting by going through the IDA routines.
"""
# get the currently active widget/form title (the form itself seems transient...)
if self.USING_IDA7API:
twidget = idaapi.get_current_widget()
title = idaapi.get_widget_title(twidget)
else:
form = idaapi.get_current_tform()
title = idaapi.get_tform_title(form)
# touch/draw the widget by playing musical chairs
if self.USING_IDA7API:
# touch the target window by switching to it
idaapi.activate_widget(target, True)
flush_qt_events()
# locate our previous selection
previous_twidget = idaapi.find_widget(title)
# return us to our previous selection
idaapi.activate_widget(previous_twidget, True)
flush_qt_events()
else:
# touch the target window by switching to it
idaapi.switchto_tform(target, True)
flush_qt_events()
# locate our previous selection
previous_form = idaapi.find_tform(title)
# lookup our original form and switch back to it
idaapi.switchto_tform(previous_form, True)
flush_qt_events()
#------------------------------------------------------------------------------
# Dockable Window
#------------------------------------------------------------------------------
class DockableWindow(DockableShim):
"""
A Dockable Qt widget, compatible with IDA 6.8 --> 7.x.
"""
def __init__(self, window_title, icon_path):
super(DockableWindow, self).__init__(window_title, icon_path)
# IDA 7+ Widgets
if IDAAPI.USING_IDA7API:
import sip
self._form = idaapi.create_empty_widget(self._window_title)
self._widget = sip.wrapinstance(long(self._form), QtWidgets.QWidget)
# legacy IDA PluginForm's
else:
self._form = idaapi.create_tform(self._window_title, None)
if USING_PYQT5:
self._widget = idaapi.PluginForm.FormToPyQtWidget(self._form, sys.modules[__name__])
else:
self._widget = idaapi.PluginForm.FormToPySideWidget(self._form, sys.modules[__name__])
# set the window icon
self._widget.setWindowIcon(self._window_icon)
def show(self):
"""
Show the dockable widget.
"""
# IDA 7+ Widgets
if IDAAPI.USING_IDA7API:
flags = idaapi.PluginForm.WOPN_TAB | \
idaapi.PluginForm.WOPN_MENU | \
idaapi.PluginForm.WOPN_RESTORE | \
idaapi.PluginForm.WOPN_PERSIST
idaapi.display_widget(self._form, flags)
# legacy IDA PluginForm's
else:
flags = idaapi.PluginForm.FORM_TAB | \
idaapi.PluginForm.FORM_MENU | \
idaapi.PluginForm.FORM_RESTORE | \
idaapi.PluginForm.FORM_PERSIST | \
0x80 #idaapi.PluginForm.FORM_QWIDGET
idaapi.open_tform(self._form, flags)
#------------------------------------------------------------------------------
# HexRays Util
#------------------------------------------------------------------------------
def map_line2citem(decompilation_text):
"""
Map decompilation line numbers to citems.
This function allows us to build a relationship between citems in the
ctree and specific lines in the hexrays decompilation text.
Output:
+- line2citem:
| a map keyed with line numbers, holding sets of citem indexes
|
| eg: { int(line_number): sets(citem_indexes), ... }
'
"""
line2citem = {}
#
# it turns out that citem indexes are actually stored inline with the
# decompilation text output, hidden behind COLOR_ADDR tokens.
#
# here we pass each line of raw decompilation text to our crappy lexer,
# extracting any COLOR_ADDR tokens as citem indexes
#
for line_number in xrange(decompilation_text.size()):
line_text = decompilation_text[line_number].line
line2citem[line_number] = lex_citem_indexes(line_text)
#logger.debug("Line Text: %s" % binascii.hexlify(line_text))
return line2citem
def map_line2node(cfunc, metadata, line2citem):
"""
Map decompilation line numbers to node (basic blocks) addresses.
This function allows us to build a relationship between graph nodes
(basic blocks) and specific lines in the hexrays decompilation text.
Output:
+- line2node:
| a map keyed with line numbers, holding sets of node addresses
|
| eg: { int(line_number): set(nodes), ... }
'
"""
line2node = {}
treeitems = cfunc.treeitems
function_address = cfunc.entry_ea
#
# prior to this function, a line2citem map was built to tell us which
# citems reside on any given line of text in the decompilation output.
#
# now, we walk through this line2citem map one 'line_number' at a time in
# an effort to resolve the set of graph nodes associated with its citems.
#
for line_number, citem_indexes in line2citem.iteritems():
nodes = set()
#
# we are at the level of a single line (line_number). we now consume
# its set of citems (citem_indexes) and attempt to identify explicit
# graph nodes they claim to be | |
= [incidents_widget,
map_widget,
assessments_widget,
activities_widget,
reports_widget,
#comments_widget,
])
# Include a Location inline
location_field = s3db.event_event_location.location_id
# Don't label a single field InlineComponent
location_field.label = ""
represent = S3Represent(lookup="gis_location")
location_field.represent = represent
# L1s only
location_field.requires = IS_NULL_OR(
IS_ONE_OF(db, "gis_location.id",
represent,
sort = True,
filterby = "level",
filter_opts = ["L1"]
)
)
# Don't add new Locations here
location_field.comment = None
# Simple dropdown
location_field.widget = None
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm(
"name",
"event_type_id",
"exercise",
"zero_hour",
"closed",
S3SQLInlineComponent(
"event_location",
label = T("District"),
multiple = False,
fields = ["location_id"],
),
"comments",
)
s3db.configure("event_event",
create_next = URL(c="event", f="event",
args=["[id]", "profile"]),
crud_form = crud_form,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_layout = render_events,
)
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="event", f="event",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Disaster"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.ui.customize_event_event = customize_event_event
# -----------------------------------------------------------------------------
def customize_gis_location(**attr):
"""
Customize gis_location controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
s3.crud_strings["gis_location"].title_list = T("Countries")
if r.method == "datalist":
# District selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Just show specific Countries
s3.filter = (table.name.belongs("Syrian Arab Republic", "Jordan", "Iraq", "Lebanon", "Turkey"))
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 13
list_fields = ["name",
"level",
"L1",
"L2",
"L3",
]
s3db.configure("gis_location",
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
customize_cms_post_fields()
customize_project_project_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
location = r.record
record_id = location.id
default = "~.(location)=%s" % record_id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #title_create = "Add New Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "icon-globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
incidents_widget = dict(label = "Incidents",
title_create = "Add New Incident",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = (S3FieldSelector("series_id$name") == "Incident") & (S3FieldSelector("expired") == False),
icon = "icon-incident",
layer = "Incidents",
# provided by Catalogue Layer
#marker = "incident",
list_layout = render_profile_posts,
)
projects_widget = dict(label = "Projects",
title_create = "Add New Project",
type = "datalist",
tablename = "project_project",
context = "location",
default = default,
icon = "icon-project",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_projects,
)
reports_widget = dict(label = "Reports",
title_create = "Add New Report",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = S3FieldSelector("series_id$name") == "Report",
icon = "icon-report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
# @ToDo: Renderer
#distributions_widget = dict(label = "Distributions",
# title_create = "Add New Distribution",
# type = "datalist",
# tablename = "supply_distribution",
# context = "location",
# default = default,
# icon = "icon-resource",
# list_layout = render_distributions,
# )
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc:
from s3.codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
name = location.name
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile_header",
),
profile_widgets = [#locations_widget,
incidents_widget,
map_widget,
projects_widget,
reports_widget,
#activities_widget,
#distributions_widget,
],
)
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
return True
s3.prep = custom_prep
return attr
settings.ui.customize_gis_location = customize_gis_location
# -----------------------------------------------------------------------------
def customize_hrm_human_resource_fields():
"""
Customize hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["person_id",
"person_id$pe_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customize_hrm_human_resource(**attr):
"""
Customize hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customize_hrm_human_resource_fields()
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.ui.customize_hrm_human_resource = customize_hrm_human_resource
# -----------------------------------------------------------------------------
def customize_hrm_job_title(**attr):
"""
Customize hrm_job_title controller
"""
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.ui.customize_hrm_job_title = customize_hrm_job_title
# -----------------------------------------------------------------------------
def customize_org_office_fields():
"""
Customize org_office for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.org_office
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["name",
"organisation_id",
"office_type_id",
"location_id",
"location_id$addr_street",
"modified_by",
"modified_on",
"organisation_id$logo",
]
s3db.configure("org_office",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customize_org_office(**attr):
"""
Customize org_office controller
"""
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_office
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customize_org_office_fields()
s3db.configure("org_office",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_offices,
)
elif r.interactive or r.representation == "aadata":
# Configure fields
table.code.readable = table.code.writable = False
#table.office_type_id.readable = table.office_type_id.writable = False
table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
table.fax.readable = table.fax.writable = False
location_field = table.location_id
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
# L1s only
location_field.requires = IS_LOCATION_SELECTOR2(levels=["L0", "L1"])
location_field.widget = S3LocationSelectorWidget2(levels=["L0", "L1"],
show_address=True,
show_map=False)
s3.cancel = True
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
| |
= shared_zone_test_context.shared_zone_vinyldns_client
ok_client = shared_zone_test_context.ok_vinyldns_client
shared_zone = shared_zone_test_context.shared_zone
shared_group = shared_zone_test_context.shared_record_group
shared_zone_name = shared_zone_test_context.shared_zone["name"]
shared_delete_name = generate_record_name()
shared_delete_fqdn = f"{shared_delete_name}.{shared_zone_name}"
shared_delete = create_recordset(shared_zone, shared_delete_name, "A", [{"address": "1.1.1.1"}], 200, shared_group["id"])
batch_change_input = {
"changes": [
get_change_A_AAAA_json(shared_delete_fqdn, change_type="DeleteRecordSet")
]
}
create_rs = shared_client.create_recordset(shared_delete, status=202)
shared_client.wait_until_recordset_change_status(create_rs, "Complete")
result = ok_client.create_batch_change(batch_change_input, status=202)
completed_batch = ok_client.wait_until_batch_change_completed(result)
assert_change_success(completed_batch["changes"], zone=shared_zone, index=0,
record_name=shared_delete_name,
input_name=shared_delete_fqdn, record_data=None,
change_type="DeleteRecordSet")
def test_create_batch_delete_recordset_for_unassociated_user_not_in_owner_group_fails(shared_zone_test_context):
"""
Test delete change in batch for a record in a shared zone for an unassociated user not belonging to the record owner group fails
"""
shared_client = shared_zone_test_context.shared_zone_vinyldns_client
unassociated_client = shared_zone_test_context.unassociated_client
shared_zone = shared_zone_test_context.shared_zone
shared_group = shared_zone_test_context.shared_record_group
shared_zone_name = shared_zone_test_context.shared_zone["name"]
shared_group_name = shared_group["name"]
create_rs = None
shared_delete_name = generate_record_name()
shared_delete_fqdn = f"{shared_delete_name}.{shared_zone_name}"
shared_delete = create_recordset(shared_zone, shared_delete_name, "A", [{"address": "1.1.1.1"}], 200, shared_group["id"])
batch_change_input = {
"changes": [
get_change_A_AAAA_json(shared_delete_fqdn, change_type="DeleteRecordSet")
]
}
try:
create_rs = shared_client.create_recordset(shared_delete, status=202)
shared_client.wait_until_recordset_change_status(create_rs, "Complete")
response = unassociated_client.create_batch_change(batch_change_input, status=400)
assert_failed_change_in_error_response(response[0], input_name=shared_delete_fqdn,
change_type="DeleteRecordSet",
error_messages=[f'User "list-group-user" is not authorized. Contact record owner group: '
f'{shared_group_name} at <EMAIL> to make DNS changes.'])
finally:
if create_rs:
delete_rs = shared_client.delete_recordset(shared_zone["id"], create_rs["recordSet"]["id"], status=202)
shared_client.wait_until_recordset_change_status(delete_rs, "Complete")
def test_create_batch_delete_recordset_for_zone_admin_not_in_owner_group_succeeds(shared_zone_test_context):
"""
Test delete change in batch for a record in a shared zone for a zone admin not belonging to the record owner group succeeds
"""
shared_client = shared_zone_test_context.shared_zone_vinyldns_client
ok_client = shared_zone_test_context.ok_vinyldns_client
shared_zone = shared_zone_test_context.shared_zone
ok_group = shared_zone_test_context.ok_group
shared_zone_name = shared_zone_test_context.shared_zone["name"]
shared_delete_name = generate_record_name()
shared_delete_fqdn = f"{shared_delete_name}.{shared_zone_name}"
shared_delete = create_recordset(shared_zone, shared_delete_name, "A", [{"address": "1.1.1.1"}], 200, ok_group["id"])
batch_change_input = {
"changes": [
get_change_A_AAAA_json(shared_delete_fqdn, change_type="DeleteRecordSet")
]
}
create_rs = ok_client.create_recordset(shared_delete, status=202)
shared_client.wait_until_recordset_change_status(create_rs, "Complete")
result = shared_client.create_batch_change(batch_change_input, status=202)
completed_batch = shared_client.wait_until_batch_change_completed(result)
assert_change_success(completed_batch["changes"], zone=shared_zone, index=0,
record_name=shared_delete_name,
input_name=shared_delete_fqdn, record_data=None,
change_type="DeleteRecordSet")
def test_create_batch_update_record_in_shared_zone_for_unassociated_user_in_owner_group_succeeds(
shared_zone_test_context):
"""
Test update change in batch for a record for a user belonging to record owner group succeeds
"""
shared_client = shared_zone_test_context.shared_zone_vinyldns_client
ok_client = shared_zone_test_context.ok_vinyldns_client
shared_zone = shared_zone_test_context.shared_zone
shared_record_group = shared_zone_test_context.shared_record_group
shared_zone_name = shared_zone_test_context.shared_zone["name"]
create_rs = None
shared_update_name = generate_record_name()
shared_update_fqdn = f"{shared_update_name}.{shared_zone_name}"
shared_update = create_recordset(shared_zone, shared_update_name, "MX", [{"preference": 1, "exchange": "foo.bar."}], 200,
shared_record_group["id"])
batch_change_input = {
"changes": [
get_change_MX_json(shared_update_fqdn, ttl=300),
get_change_MX_json(shared_update_fqdn, change_type="DeleteRecordSet")
]
}
try:
create_rs = shared_client.create_recordset(shared_update, status=202)
shared_client.wait_until_recordset_change_status(create_rs, "Complete")
result = ok_client.create_batch_change(batch_change_input, status=202)
completed_batch = ok_client.wait_until_batch_change_completed(result)
assert_change_success(completed_batch["changes"], zone=shared_zone, index=0, record_name=shared_update_name,
ttl=300,
record_type="MX", input_name=shared_update_fqdn,
record_data={"preference": 1, "exchange": "foo.bar."})
assert_change_success(completed_batch["changes"], zone=shared_zone, index=1, record_name=shared_update_name,
record_type="MX", input_name=shared_update_fqdn, record_data=None,
change_type="DeleteRecordSet")
finally:
if create_rs:
delete_rs = shared_client.delete_recordset(shared_zone["id"], create_rs["recordSet"]["id"], status=202)
shared_client.wait_until_recordset_change_status(delete_rs, "Complete")
def test_create_batch_with_global_acl_rule_applied_succeeds(shared_zone_test_context):
"""
Test that a user with a relevant global acl rule can update forward and reverse records, regardless of their current ownership
"""
shared_client = shared_zone_test_context.shared_zone_vinyldns_client
dummy_client = shared_zone_test_context.dummy_vinyldns_client
shared_zone = shared_zone_test_context.shared_zone
ok_client = shared_zone_test_context.ok_vinyldns_client
classless_base_zone = shared_zone_test_context.classless_base_zone
create_a_rs = None
create_ptr_rs = None
dummy_group_id = shared_zone_test_context.dummy_group["id"]
dummy_group_name = shared_zone_test_context.dummy_group["name"]
ip4_prefix = shared_zone_test_context.ip4_classless_prefix
shared_zone_name = shared_zone_test_context.shared_zone["name"]
a_name = generate_record_name()
a_fqdn = f"{a_name}.{shared_zone_name}"
a_record = create_recordset(shared_zone, a_name, "A", [{"address": "1.1.1.1"}], 200, "shared-zone-group")
ptr_record = create_recordset(classless_base_zone, "44", "PTR", [{"ptrdname": "foo."}], 200, None)
batch_change_input = {
"ownerGroupId": dummy_group_id,
"changes": [
get_change_A_AAAA_json(a_fqdn, record_type="A", ttl=200, address=f"{ip4_prefix}.44"),
get_change_PTR_json(f"{ip4_prefix}.44", ptrdname=a_fqdn),
get_change_A_AAAA_json(a_fqdn, record_type="A", address="1.1.1.1", change_type="DeleteRecordSet"),
get_change_PTR_json(f"{ip4_prefix}.44", change_type="DeleteRecordSet")
]
}
try:
create_a_rs = shared_client.create_recordset(a_record, status=202)
shared_client.wait_until_recordset_change_status(create_a_rs, "Complete")
create_ptr_rs = ok_client.create_recordset(ptr_record, status=202)
ok_client.wait_until_recordset_change_status(create_ptr_rs, "Complete")
result = dummy_client.create_batch_change(batch_change_input, status=202)
completed_batch = dummy_client.wait_until_batch_change_completed(result)
assert_change_success(completed_batch["changes"], zone=shared_zone, index=0,
record_name=a_name, ttl=200,
record_type="A", input_name=a_fqdn, record_data=f"{ip4_prefix}.44")
assert_change_success(completed_batch["changes"], zone=classless_base_zone, index=1,
record_name="44",
record_type="PTR", input_name=f"{ip4_prefix}.44",
record_data=a_fqdn)
assert_change_success(completed_batch["changes"], zone=shared_zone, index=2,
record_name=a_name, ttl=200,
record_type="A", input_name=a_fqdn, record_data=None,
change_type="DeleteRecordSet")
assert_change_success(completed_batch["changes"], zone=classless_base_zone, index=3,
record_name="44",
record_type="PTR", input_name=f"{ip4_prefix}.44", record_data=None,
change_type="DeleteRecordSet")
finally:
if create_a_rs:
retrieved = shared_client.get_recordset(shared_zone["id"], create_a_rs["recordSet"]["id"])
retrieved_rs = retrieved["recordSet"]
assert_that(retrieved_rs["ownerGroupId"], is_("shared-zone-group"))
assert_that(retrieved_rs["ownerGroupName"], is_("testSharedZoneGroup"))
delete_a_rs = shared_client.delete_recordset(shared_zone["id"], create_a_rs["recordSet"]["id"], status=202)
shared_client.wait_until_recordset_change_status(delete_a_rs, "Complete")
if create_ptr_rs:
retrieved = dummy_client.get_recordset(shared_zone["id"], create_ptr_rs["recordSet"]["id"])
retrieved_rs = retrieved["recordSet"]
assert_that(retrieved_rs, is_not(has_key("ownerGroupId")))
assert_that(retrieved_rs, is_not(has_key({dummy_group_name})))
delete_ptr_rs = ok_client.delete_recordset(classless_base_zone["id"], create_ptr_rs["recordSet"]["id"],
status=202)
ok_client.wait_until_recordset_change_status(delete_ptr_rs, "Complete")
def test_create_batch_with_irrelevant_global_acl_rule_applied_fails(shared_zone_test_context):
"""
Test that a user with an irrelevant global acl rule cannot update an owned records
"""
test_user_client = shared_zone_test_context.test_user_client
shared_client = shared_zone_test_context.shared_zone_vinyldns_client
shared_zone = shared_zone_test_context.shared_zone
ip4_prefix = shared_zone_test_context.ip4_classless_prefix
shared_zone_name = shared_zone_test_context.shared_zone["name"]
create_a_rs = None
a_name = generate_record_name()
a_fqdn = f"{a_name}.{shared_zone_name}"
a_record = create_recordset(shared_zone, a_name, "A", [{"address": "1.1.1.1"}], 200, "shared-zone-group")
batch_change_input = {
"changes": [
get_change_A_AAAA_json(a_fqdn, record_type="A", address=f"{ip4_prefix}.45"),
get_change_A_AAAA_json(a_fqdn, record_type="A", change_type="DeleteRecordSet"),
]
}
try:
create_a_rs = shared_client.create_recordset(a_record, status=202)
shared_client.wait_until_recordset_change_status(create_a_rs, "Complete")
response = test_user_client.create_batch_change(batch_change_input, status=400)
assert_failed_change_in_error_response(response[0], input_name=a_fqdn, record_type="A",
change_type="Add", record_data=f"{ip4_prefix}.45",
error_messages=['User "testuser" is not authorized. Contact record owner group: testSharedZoneGroup at email to make DNS changes.'])
finally:
if create_a_rs:
delete_a_rs = shared_client.delete_recordset(shared_zone["id"], create_a_rs["recordSet"]["id"], status=202)
shared_client.wait_until_recordset_change_status(delete_a_rs, "Complete")
@pytest.mark.manual_batch_review
def test_create_batch_with_zone_name_requiring_manual_review(shared_zone_test_context):
"""
Confirm that individual changes matching zone names requiring review get correctly flagged for manual review
"""
rejecter = shared_zone_test_context.support_user_client
client = shared_zone_test_context.ok_vinyldns_client
review_zone_name = shared_zone_test_context.requires_review_zone["name"]
batch_change_input = {
"changes": [
get_change_A_AAAA_json(f"add-test-batch.{review_zone_name}"),
get_change_A_AAAA_json(f"update-test-batch.{review_zone_name}", change_type="DeleteRecordSet"),
get_change_A_AAAA_json(f"update-test-batch.{review_zone_name}"),
get_change_A_AAAA_json(f"delete-test-batch.{review_zone_name}", change_type="DeleteRecordSet")
],
"ownerGroupId": shared_zone_test_context.ok_group["id"]
}
response = None
try:
response = client.create_batch_change(batch_change_input, status=202)
get_batch = client.get_batch_change(response["id"])
assert_that(get_batch["status"], is_("PendingReview"))
assert_that(get_batch["approvalStatus"], is_("PendingReview"))
for i in range(0, 3):
assert_that(get_batch["changes"][i]["status"], is_("NeedsReview"))
assert_that(get_batch["changes"][i]["validationErrors"][0]["errorType"], is_("RecordRequiresManualReview"))
finally:
# Clean up so data doesn't change
if response:
rejecter.reject_batch_change(response["id"], status=200)
def test_create_batch_delete_record_for_invalid_record_data_fails(shared_zone_test_context):
"""
Test delete record set fails for non-existent record and non-existent record data
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone_name = shared_zone_test_context.ok_zone["name"]
a_delete_name = generate_record_name()
a_delete_fqdn = a_delete_name + f".{ok_zone_name}"
a_delete = create_recordset(shared_zone_test_context.ok_zone, a_delete_fqdn, "A", [{"address": "1.1.1.1"}])
batch_change_input = {
"comments": "test delete record failures",
"changes": [
get_change_A_AAAA_json(f"delete-non-existent-record.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_A_AAAA_json(a_delete_fqdn, address="4.5.6.7", change_type="DeleteRecordSet")
]
}
to_delete = []
try:
create_rs = client.create_recordset(a_delete, status=202)
to_delete.append(client.wait_until_recordset_change_status(create_rs, "Complete"))
errors = client.create_batch_change(batch_change_input, status=400)
assert_failed_change_in_error_response(errors[0], input_name=f"delete-non-existent-record.{ok_zone_name}", record_data="1.1.1.1", change_type="DeleteRecordSet",
error_messages=[f'Record "delete-non-existent-record.{ok_zone_name}" Does Not Exist: cannot delete a record that does not exist.'])
assert_failed_change_in_error_response(errors[1], input_name=a_delete_fqdn, record_data="4.5.6.7", change_type="DeleteRecordSet",
error_messages=["Record data 4.5.6.7 does not exist for \"" + a_delete_fqdn + "\"."])
finally:
clear_recordset_list(to_delete, client)
@pytest.mark.serial
def test_create_batch_delete_record_access_checks(shared_zone_test_context):
"""
Test access for full-delete DeleteRecord (delete) and non-full-delete DeleteRecord (update)
"""
ok_client = shared_zone_test_context.ok_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
dummy_client = shared_zone_test_context.dummy_vinyldns_client
dummy_group_id = shared_zone_test_context.dummy_group["id"]
ok_zone_name = shared_zone_test_context.ok_zone["name"]
ok_group_name = shared_zone_test_context.ok_group["name"]
a_delete_acl = generate_acl_rule("Delete", groupId=dummy_group_id, recordMask=".*", recordTypes=["A"])
txt_write_acl = generate_acl_rule("Write", groupId=dummy_group_id, recordMask=".*", recordTypes=["TXT"])
a_update_name = generate_record_name()
a_update_fqdn = a_update_name + f".{ok_zone_name}"
a_update = create_recordset(ok_zone, a_update_name, "A", [{"address": "1.1.1.1"}])
a_delete_name = generate_record_name()
a_delete_fqdn = a_delete_name + f".{ok_zone_name}"
a_delete = create_recordset(ok_zone, a_delete_name, "A", [{"address": "1.1.1.1"}])
txt_update_name = generate_record_name()
txt_update_fqdn = txt_update_name + f".{ok_zone_name}"
txt_update = create_recordset(ok_zone, txt_update_name, "TXT", [{"text": "test"}])
txt_delete_name = generate_record_name()
txt_delete_fqdn = txt_delete_name + f".{ok_zone_name}"
txt_delete = create_recordset(ok_zone, txt_delete_name, "TXT", [{"text": "test"}])
batch_change_input = {
"comments": "Testing DeleteRecord access levels",
"changes": [
get_change_A_AAAA_json(a_update_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(a_update_fqdn, address="4.5.6.7"),
get_change_A_AAAA_json(a_delete_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(txt_update_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(txt_update_fqdn, text="updated text"),
get_change_TXT_json(txt_delete_fqdn, change_type="DeleteRecordSet")
]
}
to_delete = []
try:
add_ok_acl_rules(shared_zone_test_context, [a_delete_acl, txt_write_acl])
for create_json in [a_update, a_delete, txt_update, txt_delete]:
create_result = ok_client.create_recordset(create_json, status=202)
to_delete.append(ok_client.wait_until_recordset_change_status(create_result, "Complete"))
response = dummy_client.create_batch_change(batch_change_input, status=400)
assert_successful_change_in_error_response(response[0], input_name=a_update_fqdn, record_data="1.1.1.1", change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[1], input_name=a_update_fqdn, record_data="172.16.31.10")
assert_successful_change_in_error_response(response[2], input_name=a_delete_fqdn, record_data="1.1.1.1", change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[3], input_name=txt_update_fqdn, record_type="TXT", record_data="test", change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[4], input_name=txt_update_fqdn, record_type="TXT", record_data="updated text")
assert_failed_change_in_error_response(response[5], input_name=txt_delete_fqdn, record_type="TXT", record_data="test", change_type="DeleteRecordSet",
error_messages=[f'User "dummy" is not authorized. Contact zone owner group: {ok_group_name} at <EMAIL> to make DNS changes.'])
finally:
clear_ok_acl_rules(shared_zone_test_context)
clear_recordset_list(to_delete, ok_client)
@pytest.mark.skip_production
def test_create_batch_multi_record_update_succeeds(shared_zone_test_context):
"""
Test record sets with multiple records can be added, updated and deleted in batch (relies on skip-prod)
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
ok_zone_name = shared_zone_test_context.ok_zone["name"]
# record sets to setup
a_update_record_set_name = generate_record_name()
a_update_record_set_fqdn = a_update_record_set_name + f".{ok_zone_name}"
a_update_record_set = create_recordset(ok_zone, a_update_record_set_name, "A", [{"address": "1.1.1.1"}, {"address": "172.16.17.32"}], 200)
txt_update_record_set_name = generate_record_name()
txt_update_record_set_fqdn = txt_update_record_set_name + f".{ok_zone_name}"
txt_update_record_set = create_recordset(ok_zone, txt_update_record_set_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
a_update_record_full_name = generate_record_name()
a_update_record_full_fqdn = a_update_record_full_name + f".{ok_zone_name}"
a_update_record_full = create_recordset(ok_zone, a_update_record_full_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_update_record_full_name = generate_record_name()
txt_update_record_full_fqdn = txt_update_record_full_name + f".{ok_zone_name}"
txt_update_record_full = create_recordset(ok_zone, txt_update_record_full_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
a_update_record_name = generate_record_name()
a_update_record_fqdn = a_update_record_name + f".{ok_zone_name}"
a_update_record = create_recordset(ok_zone, a_update_record_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_update_record_name = generate_record_name()
txt_update_record_fqdn = txt_update_record_name + f".{ok_zone_name}"
txt_update_record = create_recordset(ok_zone, txt_update_record_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
a_update_record_only_name = generate_record_name()
a_update_record_only_fqdn = a_update_record_only_name + f".{ok_zone_name}"
a_update_record_only = create_recordset(ok_zone, a_update_record_only_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_update_record_only_name = generate_record_name()
txt_update_record_only_fqdn = txt_update_record_only_name + f".{ok_zone_name}"
txt_update_record_only = create_recordset(ok_zone, txt_update_record_only_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
a_delete_record_set_name = generate_record_name()
a_delete_record_set_fqdn = a_delete_record_set_name + f".{ok_zone_name}"
a_delete_record_set = create_recordset(ok_zone, a_delete_record_set_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_delete_record_set_name = generate_record_name()
txt_delete_record_set_fqdn = txt_delete_record_set_name + f".{ok_zone_name}"
txt_delete_record_set = create_recordset(ok_zone, txt_delete_record_set_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
a_delete_record_name = generate_record_name()
a_delete_record_fqdn = a_delete_record_name + f".{ok_zone_name}"
a_delete_record = create_recordset(ok_zone, a_delete_record_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_delete_record_name | |
assigned managed identity to use when accessing a resource.
:param str user_assigned_identity: The user assigned managed identity's resource identifier to use when accessing a resource.
"""
pulumi.set(__self__, "user_assigned_identity", user_assigned_identity)
@property
@pulumi.getter(name="userAssignedIdentity")
def user_assigned_identity(self) -> str:
"""
The user assigned managed identity's resource identifier to use when accessing a resource.
"""
return pulumi.get(self, "user_assigned_identity")
@pulumi.output_type
class RsaTokenKeyResponse(dict):
"""
Required validation properties for tokens generated with RSA algorithm.
"""
def __init__(__self__, *,
alg: str,
e: str,
kid: str,
n: str,
type: str):
"""
Required validation properties for tokens generated with RSA algorithm.
:param str alg: RSA algorithm to be used: RS256, RS384 or RS512.
:param str e: RSA public key exponent.
:param str kid: JWT token key id. Validation keys are looked up based on the key id present on the JWT token header.
:param str n: RSA public key modulus.
:param str type: The discriminator for derived types.
Expected value is '#Microsoft.VideoAnalyzer.RsaTokenKey'.
"""
pulumi.set(__self__, "alg", alg)
pulumi.set(__self__, "e", e)
pulumi.set(__self__, "kid", kid)
pulumi.set(__self__, "n", n)
pulumi.set(__self__, "type", '#Microsoft.VideoAnalyzer.RsaTokenKey')
@property
@pulumi.getter
def alg(self) -> str:
"""
RSA algorithm to be used: RS256, RS384 or RS512.
"""
return pulumi.get(self, "alg")
@property
@pulumi.getter
def e(self) -> str:
"""
RSA public key exponent.
"""
return pulumi.get(self, "e")
@property
@pulumi.getter
def kid(self) -> str:
"""
JWT token key id. Validation keys are looked up based on the key id present on the JWT token header.
"""
return pulumi.get(self, "kid")
@property
@pulumi.getter
def n(self) -> str:
"""
RSA public key modulus.
"""
return pulumi.get(self, "n")
@property
@pulumi.getter
def type(self) -> str:
"""
The discriminator for derived types.
Expected value is '#Microsoft.VideoAnalyzer.RsaTokenKey'.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class StorageAccountResponse(dict):
"""
The details about the associated storage account.
"""
def __init__(__self__, *,
status: str,
id: Optional[str] = None,
identity: Optional['outputs.ResourceIdentityResponse'] = None):
"""
The details about the associated storage account.
:param str status: The current status of the storage account mapping.
:param str id: The ID of the storage account resource. Video Analyzer relies on tables, queues, and blobs. The primary storage account must be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage).
:param 'ResourceIdentityResponse' identity: A managed identity that Video Analyzer will use to access the storage account.
"""
pulumi.set(__self__, "status", status)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
@property
@pulumi.getter
def status(self) -> str:
"""
The current status of the storage account mapping.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the storage account resource. Video Analyzer relies on tables, queues, and blobs. The primary storage account must be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage).
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ResourceIdentityResponse']:
"""
A managed identity that Video Analyzer will use to access the storage account.
"""
return pulumi.get(self, "identity")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
@pulumi.output_type
class TokenClaimResponse(dict):
"""
Properties for expected token claims.
"""
def __init__(__self__, *,
name: str,
value: str):
"""
Properties for expected token claims.
:param str name: Name of the claim which must be present on the token.
:param str value: Expected value of the claim to be present on the token.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the claim which must be present on the token.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
Expected value of the claim to be present on the token.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class UserAssignedManagedIdentityResponse(dict):
"""
The details of the user assigned managed identity used by the Video Analyzer resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserAssignedManagedIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserAssignedManagedIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserAssignedManagedIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
principal_id: str):
"""
The details of the user assigned managed identity used by the Video Analyzer resource.
:param str client_id: The client ID.
:param str principal_id: The principal ID.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The client ID.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID.
"""
return pulumi.get(self, "principal_id")
@pulumi.output_type
class VideoAnalyzerIdentityResponse(dict):
"""
The managed identity for the Video Analyzer resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VideoAnalyzerIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VideoAnalyzerIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VideoAnalyzerIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
user_assigned_identities: Optional[Mapping[str, 'outputs.UserAssignedManagedIdentityResponse']] = None):
"""
The managed identity for the Video Analyzer resource.
:param str type: The identity type.
:param Mapping[str, 'UserAssignedManagedIdentityResponse'] user_assigned_identities: The User Assigned Managed Identities.
"""
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> str:
"""
The identity type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.UserAssignedManagedIdentityResponse']]:
"""
The User Assigned Managed Identities.
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class VideoFlagsResponse(dict):
"""
Video flags contain information about the available video actions and its dynamic properties based on the current video state.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "canStream":
suggest = "can_stream"
elif key == "hasData":
suggest = "has_data"
elif key == "isRecording":
suggest = "is_recording"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VideoFlagsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VideoFlagsResponse.__key_warning(key)
return super().__getitem__(key)
| |
"""
Description :
Generates the train/test/validation data for slot action and slot value prediction model.
Run Command:
python create_slot_data.py -path=<path of input data> -out=<output directory>
"""
#--------------------------------------------
import os
import json
import argparse
import spacy
import pandas as pd
import random
import six, re
import torch
import traceback
from transformers import BertTokenizer
random.seed(1234)
default_path = os.path.join('data', 'mwz2.1')
default_out_path = "data"
default_mwz_ver = "2.1"
slot_detail = {'Type': 'type', 'Price': 'price', 'Parking': 'parking', 'Stay': 'stay', 'Day': 'day',
'People': 'people', 'Post': 'post', 'Addr': 'address', 'Dest': 'destination', 'Arrive': 'arrive',
'Depart': 'departure', 'Internet': 'internet', 'Stars': 'stars', 'Phone': 'phone', 'Area': 'area',
'Leave': 'leave', 'Time': 'time', 'Ticket': 'ticket', 'Ref': 'reference', 'Food': 'food',
'Name': 'name', 'Department': 'department', 'Fee': 'fee', 'Id': 'id', 'Car': 'car'}
domain_slot_dict = {
'hotel': {'Type', 'Area', 'Phone', 'Day', 'Parking', 'Stars', 'Post', 'People', 'Price', 'Stay', 'Addr', 'Name',
'Ref', 'Internet'}, 'police': {'Name', 'Phone', 'Post', 'Addr'},
'train': {'Arrive', 'Day', 'Leave', 'Time', 'People', 'Ticket', 'Id', 'Ref', 'Dest', 'Depart'},
'attraction': {'Type', 'Area', 'Phone', 'Fee', 'Post', 'Addr', 'Name'},
'restaurant': {'Area', 'Phone', 'Day', 'Food', 'Post', 'Time', 'Addr', 'Price', 'People', 'Name', 'Ref'},
'hospital': {'Post', 'Phone', 'Addr', 'Department'}, 'taxi': {'Arrive', 'Phone', 'Leave', 'Car', 'Dest', 'Depart'}}
meta = {'attraction': {'name', 'type', 'area'},
'hotel': {'name', 'type', 'parking', 'area', 'day', 'stay', 'internet', 'people', 'stars', 'price'},
'restaurant': {'name', 'food', 'area', 'day', 'time', 'people', 'price'},
'taxi': {'arrive', 'departure', 'leave', 'destination'},
'train': {'arrive', 'day', 'leave', 'destination', 'departure', 'people'}
}
attraction_type = ['sport', 'entertainment', 'cinema', 'museum', 'theatre', 'church', 'boat', 'architecture', 'college',
'park', 'theater', 'camboats', 'concert', 'park', 'concert', 'hiking', 'historical', 'gallery',
'nightclub', 'special', 'swimming', 'gastropub', 'outdoor', 'pool', 'pub', 'club', 'swim', 'hall',
'movie']
hotel_type = ["hotel", "guesthouse", "guest house", "lodge"]
u_slots = set()
for d in meta:
for u in meta[d]:
u_slots.add(u)
qa_slots = u_slots.difference({'parking', 'internet'})
print("Unique slots : {}".format(u_slots))
print("QA slots : {}".format(qa_slots))
ignore_domain = ['booking']
domain_set = {'police', 'restaurant', 'hotel', 'taxi', 'attraction', 'train', 'hospital'}
spacy_en = spacy.load('en_core_web_sm')
dataset_config = os.path.join('trippy_label_variant', 'multiwoz21.json')
with open(dataset_config, "r", encoding='utf-8') as f:
raw_config = json.load(f)
class_types = raw_config['class_types']
slot_list = raw_config['slots']
label_maps = raw_config['label_maps']
analyze = False
# --------------------------------------------
slot_domain_dict = {}
for dom in domain_slot_dict:
for s in domain_slot_dict[dom]:
if s not in slot_domain_dict:
slot_domain_dict[slot_detail[s]] = set()
slot_domain_dict[slot_detail[s]].add(dom)
print(slot_domain_dict)
# --------------------------------------------
def loadJson(data_file):
if os.path.isfile(data_file):
with open(data_file, 'r') as read_file:
data = json.load(read_file)
return data
def load_list_file(list_file):
with open(list_file, 'r') as read_file:
dialog_id_list = read_file.readlines()
dialog_id_list = [l.strip('\n') for l in dialog_id_list]
return dialog_id_list
return
def isUnseen(slot_key, slot_val, bs):
f = True
if (slot_key in bs):
if (slot_val == bs[slot_key]):
f = False
else:
v = bs[slot_key]
if v in label_maps:
for value_label_variant in label_maps[v]:
if slot_val == value_label_variant:
f = False
break
if (f and slot_val in label_maps):
for value_label_variant in label_maps[slot_val]:
if v == value_label_variant:
f = False
break
return f
def getBeliefState(belief_state):
bs = {}
for l in range(len(belief_state)):
for sv in belief_state[l]['slots']:
b_key = sv[0]
if ("-book" in b_key):
b_key_l = b_key.split(" ")
b_key = b_key_l[0].split("-")[0] + "-" + correctSlotName(b_key_l[1])
else:
b_key = b_key.split("-")[0] + "-" + correctSlotName(b_key.split("-")[1])
if (sv[1] != 'none'):
bs[b_key] = sv[1]
return cleanBeliefState(bs)
def getTurnPrediction(bs, bs_prev):
bs_turn = {}
for slot_key in bs:
slot_val = bs[slot_key]
if (isUnseen(slot_key, slot_val, bs_prev)):
bs_turn[slot_key] = slot_val
return bs_turn
def getDomainSlots(domain):
s = set()
for slot in domain_slot_dict[domain]:
s.add(slot_detail[slot])
return s
def cleanBeliefState(belief_state):
bs = {}
for k, v in belief_state.items():
if (v != 'none'):
bs[k] = v
return bs
def cleanDialogAct(dialog_act):
dst = {}
for k in dialog_act:
if (dialog_act[k] == "do n't care" or dialog_act[k] == "do nt care"):
dst[k] = "dontcare"
else:
dst[k] = dialog_act[k]
return dst
def correctSlotName(slot):
if (slot == "arriveby"):
return "arrive"
elif (slot == "leaveat"):
return "leave"
elif (slot == "pricerange"):
return "price"
else:
return slot
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(text) if not tok.is_space]
def getSpanValue(domain, slot, txt, span_dict):
span_val = " "
start_idx = -1
end_idx = -1
span_key = domain + "-" + slot
if (span_key in span_dict):
if (str(span_dict[span_key][1]).isnumeric() and str(span_dict[span_key][1]).isnumeric()):
tokens = tokenize_en(txt.lower())
start_idx = span_dict[span_key][1]
end_idx = span_dict[span_key][2]
span_val = ' '.join(tokens[start_idx: end_idx + 1])
return span_val
def isValidAnnotation(d_log):
flag = False
domain_set = set()
for i, t in enumerate(d_log):
if ('dialog_act' in t.keys()):
if (len(list(t['dialog_act'])) > 0):
ds = getDomain(list(t['dialog_act']))
domain_set = domain_set.union(ds)
if (len(domain_set) > 0):
flag = True
return flag
def getTurnLabel(tl):
turn_label = {}
for l in range(len(tl)):
sv = tl[l]
b_key = sv[0]
if ("-book" in b_key):
b_key_l = b_key.split(" ")
b_key = b_key_l[0].split("-")[0] + "-" + correctSlotName(b_key_l[1])
else:
b_key = b_key.split("-")[0] + "-" + correctSlotName(b_key.split("-")[1])
turn_label[b_key] = sv[1]
return cleanBeliefState(turn_label)
def getDialogueAct(da):
day = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
d_act = {}
for k, v in da.items():
dom_actual = k.split("-")[0].lower()
if (len(v) > 0):
for slots in v:
if (len(slots) > 0 and slots[0] != 'none'):
if (dom_actual != 'general'):
if (slot_detail[slots[0]] == "day" and slots[1].lower() in day):
d_act[dom_actual + "-" + slot_detail[slots[0]]] = slots[1].lower()
else:
d_act[dom_actual + "-" + slot_detail[slots[0]]] = slots[1].lower()
return cleanDialogAct(d_act)
def getDomain(dialog_act):
domain_set = set()
for d in dialog_act:
t = d.split("-")[0].lower()
if (t not in ignore_domain):
domain_set.add(t)
return domain_set
# ---------------------------------
# From bert.tokenization (TF code)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def normalize_time(text):
text = re.sub("(\d{1})(a\.?m\.?|p\.?m\.?)", r"\1 \2", text) # am/pm without space
text = re.sub("(^| )(\d{1,2}) (a\.?m\.?|p\.?m\.?)", r"\1\2:00 \3", text) # am/pm short to long form
text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2}) ?(\d{2})([^0-9]|$)", r"\1\2 \3:\4\5",
text) # Missing separator
text = re.sub("(^| )(\d{2})[;.,](\d{2})", r"\1\2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{2}):(\d{2})/", r"\1\2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{1}) (\d{2})", r"\1\2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{2}):!(\d{1})", r"\1\2:1\3", text) # Wrong format
text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2})([;., ]|$)", r"\1\2 \3:00\4",
text) # normalize simple full hour time
text = re.sub("(^| )(\d{1}:\d{2})", r"\g<1>0\2", text) # Add missing leading 0
# Map 12 hour times to 24 hour times
text = re.sub("(\d{2})(:\d{2}) ?p\.?m\.?",
lambda x: str(int(x.groups()[0]) + 12 if int(x.groups()[0]) < 12 else int(x.groups()[0])) +
x.groups()[1], text)
text = re.sub("(^| )24:(\d{2})", r"\g<1>00:\2", text) # Correct times that use 24 as hour
return text
def normalize_text(utt):
text = convert_to_unicode(utt)
text = text.lower()
text = normalize_time(text)
text = re.sub("n't", " not", text)
text = re.sub("(^| )zero(-| )star([s.,? ]|$)", r"\g<1>0 star\3", text)
text = re.sub("(^| )one(-| )star([s.,? ]|$)", r"\g<1>1 star\3", text)
text = re.sub("(^| )two(-| )star([s.,? ]|$)", r"\g<1>2 star\3", text)
text = re.sub("(^| )three(-| )star([s.,? ]|$)", r"\g<1>3 star\3", text)
text = re.sub("(^| )four(-| )star([s.,? ]|$)", r"\g<1>4 star\3", text)
text = re.sub("(^| )five(-| )star([s.,? ]|$)", r"\g<1>5 star\3", text)
text = re.sub("(^| )(\d{1})-star([s.,? ]|$)", r"\1\2 star\3", text)
text = re.sub("archaelogy", "archaeology", text) # Systematic typo
text = re.sub("mutliple", "multiple", text) # Systematic typo
# text = re.sub("guesthouse", "guest house", text) # Normalization
text = re.sub("(^| )b ?& ?b([.,? ]|$)", r"\1bed and breakfast\2", text) # Normalization
text = re.sub("bed & breakfast", "bed and breakfast", text) # Normalization
return text
def getValidAnnotations(path, ver, mode, data):
dials_path = os.path.join(path, "mwz" + ver, mode + "_dials.json")
dials = loadJson(dials_path)
dials_data = {}
for i, d in enumerate(dials):
dials_data[d['dialogue_idx']] = d
final_data = []
c = 0
for k, d in data:
if (isValidAnnotation(d['log'])):
c += 1
print("Actual data : {}".format(len(data)))
print("Valid data : {}".format(c))
def isReferral(slot, value_label, seen_slots):
ref = "none"
if slot == 'hotel-stars' or slot == 'hotel-internet' or slot == 'hotel-parking':
return ref
for s in seen_slots:
# Avoid matches for slots that share values with different meaning.
# hotel-internet and -parking are handled separately as Boolean slots.
if s == 'hotel-stars' or s == 'hotel-internet' or s == 'hotel-parking':
continue
if re.match("(hotel|restaurant)-people", s) and slot == 'hotel-stay':
continue
if re.match("(hotel|restaurant)-people", slot) and s == 'hotel-stay':
continue
if slot != s and (slot not in seen_slots or seen_slots[slot] != value_label):
if seen_slots[s] == value_label:
ref = s
break
if value_label in label_maps:
for value_label_variant in label_maps[value_label]:
if seen_slots[s] == value_label_variant:
ref = s
break
if seen_slots[s] in label_maps:
for value_label_variant in label_maps[seen_slots[s]]:
if value_label == value_label_variant:
ref = s
break
return ref
def inUtterance(val, utt, | |
<gh_stars>10-100
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements the functionalities of a minitaur using pybullet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
import numpy as np
from locomotion.robots import minitaur_constants
from locomotion.robots import minitaur_motor
from locomotion.robots import robot_config
from locomotion.robots import action_filter
from locomotion.robots import kinematics
INIT_POSITION = [0, 0, .2]
INIT_RACK_POSITION = [0, 0, 1]
INIT_ORIENTATION = [0, 0, 0, 1]
KNEE_CONSTRAINT_POINT_RIGHT = [0, 0.005, 0.2]
KNEE_CONSTRAINT_POINT_LEFT = [0, 0.01, 0.2]
OVERHEAT_SHUTDOWN_TORQUE = 2.45
OVERHEAT_SHUTDOWN_TIME = 1.0
LEG_POSITION = ["front_left", "back_left", "front_right", "back_right"]
MOTOR_NAMES = [
"motor_front_leftL_joint", "motor_front_leftR_joint",
"motor_back_leftL_joint", "motor_back_leftR_joint",
"motor_front_rightL_joint", "motor_front_rightR_joint",
"motor_back_rightL_joint", "motor_back_rightR_joint"
]
_CHASSIS_NAME_PATTERN = re.compile(r"chassis\D*center")
_MOTOR_NAME_PATTERN = re.compile(r"motor\D*joint")
_KNEE_NAME_PATTERN = re.compile(r"knee\D*")
_BRACKET_NAME_PATTERN = re.compile(r"motor\D*_bracket_joint")
_LEG_NAME_PATTERN1 = re.compile(r"hip\D*joint")
_LEG_NAME_PATTERN2 = re.compile(r"hip\D*link")
_LEG_NAME_PATTERN3 = re.compile(r"motor\D*link")
SENSOR_NOISE_STDDEV = (0.0, 0.0, 0.0, 0.0, 0.0)
MINITAUR_DEFAULT_MOTOR_DIRECTIONS = (-1, -1, -1, -1, 1, 1, 1, 1)
MINITAUR_DEFAULT_MOTOR_OFFSETS = (0, 0, 0, 0, 0, 0, 0, 0)
MINITAUR_NUM_MOTORS = 8
TWO_PI = 2 * math.pi
MINITAUR_DOFS_PER_LEG = 2
def MapToMinusPiToPi(angles):
"""Maps a list of angles to [-pi, pi].
Args:
angles: A list of angles in rad.
Returns:
A list of angle mapped to [-pi, pi].
"""
mapped_angles = copy.deepcopy(angles)
for i in range(len(angles)):
mapped_angles[i] = math.fmod(angles[i], TWO_PI)
if mapped_angles[i] >= math.pi:
mapped_angles[i] -= TWO_PI
elif mapped_angles[i] < -math.pi:
mapped_angles[i] += TWO_PI
return mapped_angles
class Minitaur(object):
"""The minitaur class that simulates a quadruped robot from Ghost Robotics."""
def __init__(self,
pybullet_client,
num_motors=MINITAUR_NUM_MOTORS,
dofs_per_leg=MINITAUR_DOFS_PER_LEG,
time_step=0.01,
action_repeat=1,
self_collision_enabled=False,
motor_control_mode=robot_config.MotorControlMode.POSITION,
motor_model_class=minitaur_motor.MotorModel,
motor_kp=1.0,
motor_kd=0.02,
motor_torque_limits=None,
pd_latency=0.0,
control_latency=0.0,
observation_noise_stdev=SENSOR_NOISE_STDDEV,
motor_overheat_protection=False,
motor_direction=MINITAUR_DEFAULT_MOTOR_DIRECTIONS,
motor_offset=MINITAUR_DEFAULT_MOTOR_OFFSETS,
on_rack=False,
reset_at_current_position=False,
sensors=None,
enable_action_interpolation=False,
enable_action_filter=False,
reset_time=-1):
"""Constructs a minitaur and reset it to the initial states.
Args:
pybullet_client: The instance of BulletClient to manage different
simulations.
num_motors: The number of the motors on the robot.
dofs_per_leg: The number of degrees of freedom for each leg.
time_step: The time step of the simulation.
action_repeat: The number of ApplyAction() for each control step.
self_collision_enabled: Whether to enable self collision.
motor_control_mode: Enum. Can either be POSITION, TORQUE, or HYBRID.
motor_model_class: We can choose from simple pd model to more accureate DC
motor models.
motor_kp: proportional gain for the motors.
motor_kd: derivative gain for the motors.
motor_torque_limits: Torque limits for the motors. Can be a single float
or a list of floats specifying different limits for different robots. If
not provided, the default limit of the robot is used.
pd_latency: The latency of the observations (in seconds) used to calculate
PD control. On the real hardware, it is the latency between the
microcontroller and the motor controller.
control_latency: The latency of the observations (in second) used to
calculate action. On the real hardware, it is the latency from the motor
controller, the microcontroller to the host (Nvidia TX2).
observation_noise_stdev: The standard deviation of a Gaussian noise model
for the sensor. It should be an array for separate sensors in the
following order [motor_angle, motor_velocity, motor_torque,
base_roll_pitch_yaw, base_angular_velocity]
motor_overheat_protection: Whether to shutdown the motor that has exerted
large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time
(OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in minitaur.py for more
details.
motor_direction: A list of direction values, either 1 or -1, to compensate
the axis difference of motors between the simulation and the real robot.
motor_offset: A list of offset value for the motor angles. This is used to
compensate the angle difference between the simulation and the real
robot.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to visualize.
reset_at_current_position: Whether to reset the minitaur at the current
position and orientation. This is for simulating the reset behavior in
the real world.
sensors: a list of sensors that are attached to the robot.
enable_action_interpolation: Whether to interpolate the current action
with the previous action in order to produce smoother motions
enable_action_filter: Boolean specifying if a lowpass filter should be
used to smooth actions.
"""
self.num_motors = num_motors
self.num_legs = self.num_motors // dofs_per_leg
self._pybullet_client = pybullet_client
self._action_repeat = action_repeat
self._self_collision_enabled = self_collision_enabled
self._motor_direction = motor_direction
self._motor_offset = motor_offset
self._observed_motor_torques = np.zeros(self.num_motors)
self._applied_motor_torques = np.zeros(self.num_motors)
self._max_force = 3.5
self._pd_latency = pd_latency
self._control_latency = control_latency
self._observation_noise_stdev = observation_noise_stdev
self._observation_history = collections.deque(maxlen=100)
self._control_observation = []
self._chassis_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._foot_link_ids = []
self._motor_overheat_protection = motor_overheat_protection
self._on_rack = on_rack
self._reset_at_current_position = reset_at_current_position
self.SetAllSensors(sensors if sensors is not None else list())
self._is_safe = True
self._enable_action_interpolation = enable_action_interpolation
self._enable_action_filter = enable_action_filter
self._last_action = None
if not motor_model_class:
raise ValueError("Must provide a motor model class!")
if self._on_rack and self._reset_at_current_position:
raise ValueError("on_rack and reset_at_current_position "
"cannot be enabled together")
if isinstance(motor_kp, (collections.Sequence, np.ndarray)):
self._motor_kps = np.asarray(motor_kp)
else:
self._motor_kps = np.full(num_motors, motor_kp)
if isinstance(motor_kd, (collections.Sequence, np.ndarray)):
self._motor_kds = np.asarray(motor_kd)
else:
self._motor_kds = np.full(num_motors, motor_kd)
if isinstance(motor_torque_limits, (collections.Sequence, np.ndarray)):
self._motor_torque_limits = np.asarray(motor_torque_limits)
elif motor_torque_limits is None:
self._motor_torque_limits = None
else:
self._motor_torque_limits = motor_torque_limits
self._motor_control_mode = motor_control_mode
self._motor_model = motor_model_class(
kp=motor_kp,
kd=motor_kd,
torque_limits=self._motor_torque_limits,
motor_control_mode=motor_control_mode)
self.time_step = time_step
self._step_counter = 0
# This also includes the time spent during the Reset motion.
self._state_action_counter = 0
_, self._init_orientation_inv = self._pybullet_client.invertTransform(
position=[0, 0, 0], orientation=self._GetDefaultInitOrientation())
if self._enable_action_filter:
self._action_filter = self._BuildActionFilter()
# reset_time=-1.0 means skipping the reset motion.
# See Reset for more details.
self.Reset(reset_time=reset_time)
self.ReceiveObservation()
def GetTimeSinceReset(self):
return self._step_counter * self.time_step
def _StepInternal(self, action, motor_control_mode=None):
self.ApplyAction(action, motor_control_mode)
self._pybullet_client.stepSimulation()
self.ReceiveObservation()
self._state_action_counter += 1
def Step(self, action, motor_control_mode=None):
"""Steps simulation."""
if self._enable_action_filter:
action = self._FilterAction(action)
for i in range(self._action_repeat):
proc_action = self.ProcessAction(action, i)
self._StepInternal(proc_action, motor_control_mode)
self._step_counter += 1
self._last_action = action
def Terminate(self):
pass
def GetFootLinkIDs(self):
"""Get list of IDs for all foot links."""
return self._foot_link_ids
def _RecordMassInfoFromURDF(self):
"""Records the mass information from the URDF file."""
self._base_mass_urdf = []
for chassis_id in self._chassis_link_ids:
self._base_mass_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped, chassis_id)[0])
self._leg_masses_urdf = []
for leg_id in self._leg_link_ids:
self._leg_masses_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped, leg_id)[0])
for motor_id in self._motor_link_ids:
self._leg_masses_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped, motor_id)[0])
def _RecordInertiaInfoFromURDF(self):
"""Record the inertia of each body from URDF file."""
self._link_urdf = []
num_bodies = self._pybullet_client.getNumJoints(self.quadruped)
for body_id in range(-1, num_bodies): # -1 is for the base link.
inertia = self._pybullet_client.getDynamicsInfo(self.quadruped,
body_id)[2]
self._link_urdf.append(inertia)
# We need to use id+1 to index self._link_urdf because it has the base
# (index = -1) at the first element.
self._base_inertia_urdf = [
self._link_urdf[chassis_id + 1]
for chassis_id in self._chassis_link_ids
]
self._leg_inertia_urdf = [
self._link_urdf[leg_id + 1] for leg_id in self._leg_link_ids
]
self._leg_inertia_urdf.extend(
[self._link_urdf[motor_id + 1] for motor_id in self._motor_link_ids])
def _BuildJointNameToIdDict(self):
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._joint_name_to_id = {}
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
self._joint_name_to_id[joint_info[1].decode("UTF-8")] = joint_info[0]
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._chassis_link_ids = [-1]
# The self._leg_link_ids include both the upper and lower links of the leg.
self._leg_link_ids = []
self._motor_link_ids = []
self._foot_link_ids = []
self._bracket_link_ids = []
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if _CHASSIS_NAME_PATTERN.match(joint_name):
self._chassis_link_ids.append(joint_id)
elif _BRACKET_NAME_PATTERN.match(joint_name):
self._bracket_link_ids.append(joint_id)
elif _MOTOR_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
elif _KNEE_NAME_PATTERN.match(joint_name):
self._foot_link_ids.append(joint_id)
elif (_LEG_NAME_PATTERN1.match(joint_name)
or _LEG_NAME_PATTERN2.match(joint_name)
or _LEG_NAME_PATTERN3.match(joint_name)):
self._leg_link_ids.append(joint_id)
else:
raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._foot_link_ids)
self._chassis_link_ids.sort()
self._motor_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
self._bracket_link_ids.sort()
def _RemoveDefaultJointDamping(self):
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
self._pybullet_client.changeDynamics(joint_info[0],
-1,
linearDamping=0,
angularDamping=0)
def _BuildMotorIdList(self):
self._motor_id_list = [
self._joint_name_to_id[motor_name]
for motor_name in self._GetMotorNames()
]
def _CreateRackConstraint(self, init_position, init_orientation):
"""Create a constraint that keeps the chassis at a fixed frame.
This frame is defined by init_position and init_orientation.
Args:
init_position: initial position of the fixed frame.
init_orientation: initial orientation of the fixed frame in quaternion
format [x,y,z,w].
Returns:
Return | |
0 , 120 , (3, 0, None, None) , 0 , )),
(( 'Copy' , 'ImageFrom' , ), 20, (20, (), [ (9, 0, None, "IID('{F2BCF189-0B27-11D4-B5F5-9CC767000000}')") , ], 1 , 1 , 4 , 0 , 124 , (3, 0, None, None) , 0 , )),
(( 'IsModified' , 'pVal' , ), 21, (21, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( 'FileName' , 'pVal' , ), 22, (22, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 132 , (3, 0, None, None) , 0 , )),
(( 'PageNumber' , 'pVal' , ), 23, (23, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( 'PageCount' , 'pVal' , ), 24, (24, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 140 , (3, 0, None, None) , 0 , )),
(( 'Save' , ), 25, (25, (), [ ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( 'CopyToClipboard' , ), 29, (29, (), [ ], 1 , 1 , 4 , 0 , 148 , (3, 0, None, None) , 0 , )),
(( 'SaveToBitmap' , 'hBitmap' , ), 30, (30, (), [ (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( 'SaveToDIB' , 'hDib' , ), 31, (31, (), [ (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 156 , (3, 0, None, None) , 0 , )),
(( 'SaveToMemory' , 'pData' , ), 32, (32, (), [ (16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( 'OpenFromClipboard' , ), 26, (26, (), [ ], 1 , 1 , 4 , 0 , 164 , (3, 0, None, None) , 0 , )),
(( 'OpenFromBitmap' , 'hBitmap' , ), 27, (27, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( 'LoadFromMemory' , 'pData' , ), 28, (28, (), [ (12, 1, None, None) , ], 1 , 1 , 4 , 0 , 172 , (3, 0, None, None) , 0 , )),
(( 'pScaleBmpBrightness' , 'pVal' , ), 37, (37, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 176 , (3, 0, None, None) , 1089 , )),
(( 'pScaleBmpBrightness' , 'pVal' , ), 37, (37, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 180 , (3, 0, None, None) , 1089 , )),
(( 'pScaleBmpContrast' , 'pVal' , ), 38, (38, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 184 , (3, 0, None, None) , 1089 , )),
(( 'pScaleBmpContrast' , 'pVal' , ), 38, (38, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 188 , (3, 0, None, None) , 1089 , )),
(( 'pScaleBmpType' , 'pVal' , ), 36, (36, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 192 , (3, 0, None, None) , 1088 , )),
(( 'pScaleBmpType' , 'pVal' , ), 36, (36, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 196 , (3, 0, None, None) , 1088 , )),
(( 'ScaleToDIB' , 'ScaleX' , 'ScaleY' , 'hBitmap' , ), 40, (40, (), [
(5, 1, None, None) , (5, 1, None, None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 1088 , )),
(( 'Format' , 'pVal' , ), 41, (41, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 204 , (3, 0, None, None) , 0 , )),
(( 'Handle' , 'pVal' , ), 42, (42, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 208 , (3, 0, None, None) , 1088 , )),
(( 'CreateZoneRect' , 'Rect' , 'ppInt' , ), 43, (43, (), [ (9, 1, None, "IID('{4ED88244-0BE1-11D4-B5F6-009FC6000000}')") ,
(16393, 10, None, "IID('{F2BCF189-0B27-11D4-B5F5-9CC767000000}')") , ], 1 , 1 , 4 , 0 , 212 , (3, 0, None, None) , 0 , )),
(( 'CreateZone' , 'left' , 'top' , 'right' , 'bottom' ,
'ppInt' , ), 45, (45, (), [ (3, 49, '0', None) , (3, 49, '0', None) , (3, 49, '0', None) ,
(3, 49, '0', None) , (16393, 10, None, "IID('{F2BCF189-0B27-11D4-B5F5-9CC767000000}')") , ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( 'IsZone' , 'pVal' , ), 44, (44, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 220 , (3, 0, None, None) , 0 , )),
(( 'Parent' , 'pVal' , ), 46, (46, (), [ (16393, 10, None, "IID('{F2BCF189-0B27-11D4-B5F5-9CC767000000}')") , ], 1 , 2 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
(( 'Duplicate' , 'ppInt' , ), 47, (47, (), [ (16393, 10, None, "IID('{F2BCF189-0B27-11D4-B5F5-9CC767000000}')") , ], 1 , 1 , 4 , 0 , 228 , (3, 0, None, None) , 0 , )),
(( 'LogFlags' , 'LogType' , 'pVal' , ), 49, (49, (), [ (3, 1, None, None) ,
(16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 232 , (3, 0, None, None) , 1088 , )),
(( 'LogFlags' , 'LogType' , 'pVal' , ), 49, (49, (), [ (3, 1, None, None) ,
(3, 1, None, None) , ], 1 , 4 , 4 , 0 , 236 , (3, 0, None, None) , 1088 , )),
(( 'LogSignature' , 'pVal' , ), 50, (50, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 240 , (3, 0, None, None) , 1088 , )),
(( 'LogSignature' , 'pVal' , ), 50, (50, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 244 , (3, 0, None, None) , 1088 , )),
(( 'Crop' , 'left' , 'top' , 'right' , 'bottom' ,
), 51, (51, (), [ (3, 1, None, None) , (3, 1, None, None) , (3, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 248 , (3, 0, None, None) , 0 , )),
(( 'LineBytes' , 'pVal' , ), 52, (52, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 252 , (3, 0, None, None) , 0 , )),
(( 'BitsPerPixel' , 'pVal' , ), 53, (53, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 256 , (3, 0, None, None) , 0 , )),
(( 'CreateBpp' , 'Width' , 'Height' , 'BitsPerPixel' , ), 54, (54, (), [
(3, 1, None, None) , (3, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 260 , (3, 0, None, None) , 0 , )),
(( 'Buffer' , 'pVal' , ), 55, (55, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 264 , (3, 0, None, | |
height, width = matrix.shape[-3:]
row_extent = height - kernel_shape[0] + 1
col_extent = width - kernel_shape[1] + 1
# starting block indices
start_idx = opr.arange(kernel_shape[0])[:, None] * height + np.arange(kernel_shape[1])
start_idx = start_idx.flatten()[None, :]
offset_filter = np.arange(
0, filters * height * width, height * width
).reshape(-1, 1)
start_idx = start_idx + offset_filter
# offsetted indices across the height and width of A
offset_idx = opr.arange(row_extent)[:, None][::strides[0]] * height + opr.arange(0, col_extent, strides[1])
# actual indices
if indices is True:
return start_idx.ravel()[:, None] + offset_idx.ravel()
return opr.take(matrix, start_idx.ravel()[:, None] + offset_idx.ravel())
class Relu(Node):
def __init__(
self,
from_node: list,
to_node: list,
input_shape: tuple,
config: Config,
depth=None,
bounds=Bounds(),
id=None
):
"""
Arguments:
from_node:
list of input nodes.
to_node:
list of output nodes.
input_shape:
shape of the input tensor to the node.
config:
configuration.
depth:
the depth of the node.
bounds:
concrete bounds for the node.
"""
super().__init__(
from_node,
to_node,
input_shape,
input_shape,
config,
depth=depth,
bounds=bounds,
id=id
)
self.state = np.array(
[ReluState.UNSTABLE] * self.output_size,
dtype=ReluState,
).reshape(self.output_shape)
self.dep_root = np.array(
[False] * self.output_size,
dtype=bool
).reshape(self.output_shape)
self.active_flag = None
self.inactive_flag = None
self.stable_flag = None
self.unstable_flag = None
self.propagation_flag = None
self.stable_count = None
self.unstable_count = None
self.active_count = None
self.inactive_count = None
self.propagation_count = None
def copy(self):
"""
Copies the node.
"""
relu = Relu(
self.from_node,
self.to_node,
self.input_shape,
self.config,
depth=self.depth,
bounds=self.bounds.copy(),
id=self.id
)
relu.state = self.state.copy()
relu.dep_root = self.dep_root.copy()
return relu
def get_milp_var_size(self):
"""
Returns the number of milp variables required for the milp encoding of
the node.
"""
return 2 * self.output_size
def forward(self, inp: torch.tensor=None, save_output=None) -> torch.tensor:
"""
Computes the output of the node given an input.
Arguments:
inp:
the input.
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
assert inp is not None or self.from_node[0].output is not None
inp = self.from_node[0].output if inp is None else inp
output = torch.clamp(inp, 0, math.inf)
if save_output:
self.output = output
return output
def forward_numpy(self, inp: np.array=None, save_output=None) -> np.array:
"""
Computes the output of the node given a numpy input.
Arguments:
inp:
the input.
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
assert inp is not None or self.from_node[0].output is not None
inp = self.from_node[0].output if inp is None else inp
output = np.clip(inp, 0, math.inf)
if save_output:
self.output = output
return output
def reset_state_flags(self):
"""
Resets calculation flags for relu states
"""
self.active_flag = None
self.inactive_flag = None
self.stable_flag = None
self.unstable_flag = None
self.propagation_flag = None
self.propagation_flag = None
self.unstable_count = None
self.active_count = None
self.inactive_count = None
self.propagation_count = None
def is_active(self, index):
"""
Detemines whether a given ReLU node is stricly active.
Arguments:
index:
the index of the node.
Returns:
bool expressing the active state of the given node.
"""
cond1 = self.from_node[0].bounds.lower[index] >= 0
cond2 = self.state[index] == ReluState.ACTIVE
return cond1 or cond2
def is_inactive(self, index: tuple):
"""
Determines whether a given ReLU node is strictly inactive.
Arguments:
index:
the index of the node.
Returns:
bool expressing the inactive state of the given node.
"""
cond1 = self.from_node[0].bounds.upper[index] <= 0
cond2 = self.state[index] == ReluState.INACTIVE
return cond1 or cond2
def is_stable(self, index: tuple, delta_val: float=None):
"""
Determines whether a given ReLU node is stable.
Arguments:
index:
the index of the node.
delta_val:
the value of the binary variable associated with the node. if
set, the value is also used in conjunction with the node's
bounds to determined its stability.
Returns:
bool expressing the stability of the given node.
"""
cond0a = self.from_node[0].bounds.lower[index].item() >= 0
cond0b = self.from_node[0].bounds.upper[index].item() <= 0
cond1 = cond0a or cond0b
cond2 = self.state[index] != ReluState.UNSTABLE
cond3 = False if delta_val is None else delta_val in [0, 1]
return cond1 or cond2 or cond3
def get_active_flag(self) -> torch.tensor:
"""
Returns an array of activity statuses for each ReLU node.
"""
if self.active_flag is None:
self.active_flag = self.from_node[0].bounds.lower.flatten() > 0
return self.active_flag
def get_active_count(self) -> int:
"""
Returns the total number of active Relu nodes.
"""
if self.active_count is None:
self.active_count = torch.sum(self.get_active_flag())
return self.active_count
def get_inactive_flag(self) -> torch.tensor:
"""
Returns an array of inactivity statuses for each ReLU node.
"""
if self.inactive_flag is None:
self.inactive_flag = self.from_node[0].bounds.upper.flatten() <= 0
return self.inactive_flag
def get_inactive_count(self) -> int:
"""
Returns the total number of inactive ReLU nodes.
"""
if self.inactive_count is None:
self.inactive_count = torch.sum(self.get_inactive_flag())
return self.inactive_count
def get_unstable_flag(self) -> torch.tensor:
"""
Returns an array of instability statuses for each ReLU node.
"""
if self.unstable_flag is None:
self.unstable_flag = torch.logical_and(
self.from_node[0].bounds.lower < 0,
self.from_node[0].bounds.upper > 0
).flatten()
return self.unstable_flag
def get_unstable_count(self) -> int:
"""
Returns the total number of unstable nodes.
"""
if self.unstable_count is None:
self.unstable_count = torch.sum(self.get_unstable_flag())
return self.unstable_count
def get_stable_flag(self) -> torch.tensor:
"""
Returns an array of instability statuses for each ReLU node.
"""
if self.stable_flag is None:
self.stable_flag = torch.logical_or(
self.get_active_flag(),
self.get_inactive_flag()
).flatten()
return self.stable_flag
def get_stable_count(self) -> int:
"""
Returns the total number of unstable nodes.
"""
if self.stable_count is None:
self.stable_count = torch.sum(self.get_stable_flag()).item()
return self.stable_count
def get_propagation_flag(self) -> torch.tensor:
"""
Returns an array of sip propagation statuses for each node.
"""
if self.propagation_flag is None:
self.propagation_flag = torch.logical_or(
self.get_active_flag(),
self.get_unstable_flag()
).flatten()
return self.propagation_flag
def get_propagation_count(self) -> int:
"""
Returns the total number of sip propagation nodes.
"""
if self.propagation_count is None:
self.propagation_count = torch.sum(self.get_propagation_flag())
return self.propagation_count
def get_upper_relaxation_slope(self) -> torch.tensor:
"""
Returns:
The upper relaxation slope for each of the ReLU nodes.
"""
slope = torch.zeros(
self.output_size, dtype=self.config.PRECISION, device=self.config.DEVICE
)
upper = self.from_node[0].bounds.upper.flatten()[self.get_unstable_flag()]
lower = self.from_node[0].bounds.lower.flatten()[self.get_unstable_flag()]
slope[self.get_unstable_flag()] = upper / (upper - lower)
slope[self.get_active_flag()] = 1.0
return slope
def get_lower_relaxation_slope(self):
"""
Returns:
The upper relaxation slope for each of the ReLU nodes.
"""
slope = torch.ones(
self.output_size, dtype=self.config.PRECISION, device=self.config.DEVICE
)
upper = self.from_node[0].bounds.upper.flatten()
lower = self.from_node[0].bounds.lower.flatten()
idxs = abs(lower) >= upper
slope[idxs] = 0.0
slope[self.get_inactive_flag()] = 0.0
slope[self.get_active_flag()] = 1.0
return slope
class Reshape(Node):
def __init__(
self,
from_node: list,
to_node: list,
input_shape: tuple,
output_shape: tuple,
config: Config,
depth=None,
bounds=Bounds(),
id=None
):
"""
Arguments:
from_node:
list of input nodes.
to_node:
list of output nodes.
input_shape:
shape of the input tensor to the node.
output_shape:
shape of the output tensor to the node.
config:
configuration.
depth:
the depth of the node.
bounds:
concrete bounds for the node.
"""
super().__init__(
from_node,
to_node,
input_shape,
output_shape,
config,
depth=self.depth,
bounds=self.bounds,
id=id
)
def copy(self):
"""
Copies the node.
"""
return Reshape(
self.from_node,
self.to_node,
self.input_shape,
self.output_shape,
self.config,
depth=self.depth,
bounds=self.bounds.copy(),
id=self.id
)
def get_milp_var_size(self):
"""
Returns the number of milp variables required for the milp encoding of
the node.
"""
return 0
class Flatten(Node):
def __init__(
self,
from_node: list,
to_node: list,
input_shape: tuple,
config: Config,
depth=None,
bounds=Bounds(),
id=None
):
"""
Arguments:
from_node:
list of input nodes.
to_node:
list of output nodes.
input_shape:
shape of the input tensor to the node.
config:
configuration.
depth:
the depth of the node.
bounds:
concrete bounds for the node.
"""
super().__init__(
from_node,
to_node,
input_shape,
(np.prod(input_shape),),
config,
depth=depth,
bounds=bounds,
id=id
)
def copy(self):
"""
Copies the node.
"""
return Flatten(
self.from_node,
self.to_node,
self.input_shape,
self.config,
depth=self.depth,
bounds=self.bounds.copy(),
id=self.id
)
def get_milp_var_size(self):
"""
Returns the number of milp variables required for the milp encoding of
the node.
"""
return 0
def forward(self, inp: torch.tensor=None, save_output=False) -> torch.tensor:
"""
Computes the output of the node given an input.
Arguments:
inp:
the input.
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
assert inp is not None or self.from_node[0].output is not None
inp = self.from_node[0].output if inp is None else inp
output = inp.flatten()
if save_output:
self.output = output
return output
def forward_numpy(self, inp: np.ndarray=None, save_output=False) -> np.ndarray:
"""
Computes the output of the node given a numpy input.
Arguments:
inp:
the input.
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
assert | |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-12 15:57
from __future__ import unicode_literals
import core.model_fields
import core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0021_image_file_hash'),
('export_readiness', '0032_auto_20190307_1512'),
]
operations = [
migrations.RenameField(
model_name='countryguidepage',
old_name='landing_page_title',
new_name='heading',
),
migrations.RenameField(
model_name='countryguidepage',
old_name='related_content_heading',
new_name='statistic_1_heading',
),
migrations.RenameField(
model_name='countryguidepage',
old_name='selling_point_one_heading',
new_name='statistic_1_number',
),
migrations.RemoveField(
model_name='countryguidepage',
name='related_content_intro',
),
migrations.RemoveField(
model_name='countryguidepage',
name='section_one_content',
),
migrations.RemoveField(
model_name='countryguidepage',
name='section_one_heading',
),
migrations.RemoveField(
model_name='countryguidepage',
name='section_two_content',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_one_content',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_one_icon',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_three_content',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_three_heading',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_three_icon',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_two_content',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_two_heading',
),
migrations.RemoveField(
model_name='countryguidepage',
name='selling_point_two_icon',
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_hero_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion hero'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion Icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_1_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_1_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_2_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_2_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_3_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_3_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_4_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_4_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_4_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_5_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_5_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_5_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_6_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_6_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_statistic_6_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_1_body',
field=models.TextField(blank=True, verbose_name='Subsection 1 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_1_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 1 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_2_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_2_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_3_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_subsection_3_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_teaser',
field=models.TextField(blank=True, verbose_name='Accordion teaser'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_1_title',
field=models.CharField(blank=True, max_length=255, verbose_name='Accordion title'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_hero_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion hero'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion Icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_1_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_1_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_2_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_2_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_3_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_3_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_4_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_4_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_4_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_5_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_5_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_5_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_6_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_6_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_statistic_6_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_1_body',
field=models.TextField(blank=True, verbose_name='Subsection 1 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_1_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 1 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_2_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_2_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_3_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_subsection_3_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_teaser',
field=models.TextField(blank=True, verbose_name='Accordion teaser'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_2_title',
field=models.CharField(blank=True, max_length=255, verbose_name='Accordion title'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_hero_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion hero'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion Icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_1_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_1_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_2_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_2_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_3_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_3_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_4_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_4_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_4_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_5_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_5_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_5_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_6_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_6_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_statistic_6_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_1_body',
field=models.TextField(blank=True, verbose_name='Subsection 1 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_1_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 1 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_2_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_2_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_3_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_subsection_3_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_teaser',
field=models.TextField(blank=True, verbose_name='Accordion teaser'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_3_title',
field=models.CharField(blank=True, max_length=255, verbose_name='Accordion title'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_hero_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion hero'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion Icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_1_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_1_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_2_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_2_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 2 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_3_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_3_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 3 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_4_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_4_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_4_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 4 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_5_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_5_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_5_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 5 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_6_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_6_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 number'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_statistic_6_smallprint',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 6 smallprint'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_1_body',
field=models.TextField(blank=True, verbose_name='Subsection 1 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_1_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 1 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_2_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_2_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_2_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_3_body',
field=models.TextField(blank=True, verbose_name='Subsection 2 body'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_3_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Subsection 2 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_subsection_3_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Subsection 2 icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_teaser',
field=models.TextField(blank=True, verbose_name='Accordion teaser'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_4_title',
field=models.CharField(blank=True, max_length=255, verbose_name='Accordion title'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_5_hero_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion hero'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_5_icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Accordion Icon'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_5_statistic_1_heading',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 heading'),
),
migrations.AddField(
model_name='countryguidepage',
name='accordion_5_statistic_1_number',
field=models.CharField(blank=True, max_length=255, verbose_name='Stat 1 number'),
| |
= bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b , act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu,scale3b2_branch2c] )
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2 , act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a , act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b , act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu,scale3b3_branch2c] )
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3 , act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a , act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b , act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1,scale4a_branch2c] )
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a , act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a , act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b , act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu,scale4b1_branch2c] )
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1 , act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a , act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b , act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu,scale4b2_branch2c] )
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2 , act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a , act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b , act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu,scale4b3_branch2c] )
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3 , act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a , act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b , act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu,scale4b4_branch2c] )
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4 , act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a , act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b , act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu,scale4b5_branch2c] )
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5 , act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a , act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b , act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu,scale4b6_branch2c] )
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6 , act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a , act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b , act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu,scale4b7_branch2c] )
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7 , act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a , act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b , act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu,scale4b8_branch2c] )
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8 , act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a , act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b , act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu,scale4b9_branch2c] )
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9 , act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a , act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b , act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu,scale4b10_branch2c] )
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10 , act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a , act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b , act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu,scale4b11_branch2c] )
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11 , act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a , act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b , act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu,scale4b12_branch2c] )
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12 , act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a , | |
"""Outbound transport manager."""
import asyncio
import json
import logging
import time
from typing import Callable, Type, Union
from urllib.parse import urlparse
from ...connections.models.connection_target import ConnectionTarget
from ...config.injection_context import InjectionContext
from ...core.profile import Profile
from ...utils.classloader import ClassLoader, ModuleLoadError, ClassNotFoundError
from ...utils.stats import Collector
from ...utils.task_queue import CompletedTask, TaskQueue, task_exc_info
from ...utils.tracing import trace_event, get_timer
from ..wire_format import BaseWireFormat
from .base import (
BaseOutboundTransport,
OutboundDeliveryError,
OutboundTransportRegistrationError,
)
from .message import OutboundMessage
LOGGER = logging.getLogger(__name__)
MODULE_BASE_PATH = "aries_cloudagent.transport.outbound"
class QueuedOutboundMessage:
"""Class representing an outbound message pending delivery."""
STATE_NEW = "new"
STATE_PENDING = "pending"
STATE_ENCODE = "encode"
STATE_DELIVER = "deliver"
STATE_RETRY = "retry"
STATE_DONE = "done"
def __init__(
self,
profile: Profile,
message: OutboundMessage,
target: ConnectionTarget,
transport_id: str,
):
"""Initialize the queued outbound message."""
self.profile = profile
self.endpoint = target and target.endpoint
self.error: Exception = None
self.message = message
self.payload: Union[str, bytes] = None
self.retries = None
self.retry_at: float = None
self.state = self.STATE_NEW
self.target = target
self.task: asyncio.Task = None
self.transport_id: str = transport_id
self.metadata: dict = None
self.api_key: str = None
class OutboundTransportManager:
"""Outbound transport manager class."""
MAX_RETRY_COUNT = 4
def __init__(
self, context: InjectionContext, handle_not_delivered: Callable = None
):
"""
Initialize a `OutboundTransportManager` instance.
Args:
context: The application context
handle_not_delivered: An optional handler for undelivered messages
"""
self.context = context
self.loop = asyncio.get_event_loop()
self.handle_not_delivered = handle_not_delivered
self.outbound_buffer = []
self.outbound_event = asyncio.Event()
self.outbound_new = []
self.registered_schemes = {}
self.registered_transports = {}
self.running_transports = {}
self.task_queue = TaskQueue(max_active=200)
self._process_task: asyncio.Task = None
if self.context.settings.get("transport.max_outbound_retry"):
self.MAX_RETRY_COUNT = self.context.settings["transport.max_outbound_retry"]
async def setup(self):
"""Perform setup operations."""
outbound_transports = (
self.context.settings.get("transport.outbound_configs") or []
)
for outbound_transport in outbound_transports:
self.register(outbound_transport)
def register(self, module: str) -> str:
"""
Register a new outbound transport by module path.
Args:
module: Module name to register
Raises:
OutboundTransportRegistrationError: If the imported class cannot
be located
OutboundTransportRegistrationError: If the imported class does not
specify a schemes attribute
OutboundTransportRegistrationError: If the scheme has already been
registered
"""
try:
imported_class = ClassLoader.load_subclass_of(
BaseOutboundTransport, module, MODULE_BASE_PATH
)
except (ModuleLoadError, ClassNotFoundError):
raise OutboundTransportRegistrationError(
f"Outbound transport module {module} could not be resolved."
)
return self.register_class(imported_class)
def register_class(
self, transport_class: Type[BaseOutboundTransport], transport_id: str = None
) -> str:
"""
Register a new outbound transport class.
Args:
transport_class: Transport class to register
Raises:
OutboundTransportRegistrationError: If the imported class does not
specify a schemes attribute
OutboundTransportRegistrationError: If the scheme has already been
registered
"""
try:
schemes = transport_class.schemes
except AttributeError:
raise OutboundTransportRegistrationError(
f"Imported class {transport_class} does not "
+ "specify a required 'schemes' attribute"
)
if not transport_id:
transport_id = transport_class.__qualname__
for scheme in schemes:
if scheme in self.registered_schemes:
# A scheme can only be registered once
raise OutboundTransportRegistrationError(
f"Cannot register transport '{transport_id}'"
f"for '{scheme}' scheme because the scheme"
"has already been registered"
)
self.registered_transports[transport_id] = transport_class
for scheme in schemes:
self.registered_schemes[scheme] = transport_id
return transport_id
async def start_transport(self, transport_id: str):
"""Start a registered transport."""
transport = self.registered_transports[transport_id]()
transport.collector = self.context.inject_or(Collector)
await transport.start()
self.running_transports[transport_id] = transport
async def start(self):
"""Start all transports and feed messages from the queue."""
for transport_id in self.registered_transports:
self.task_queue.run(self.start_transport(transport_id))
async def stop(self, wait: bool = True):
"""Stop all running transports."""
if self._process_task and not self._process_task.done():
self._process_task.cancel()
await self.task_queue.complete(None if wait else 0)
for transport in self.running_transports.values():
await transport.stop()
self.running_transports = {}
def get_registered_transport_for_scheme(self, scheme: str) -> str:
"""Find the registered transport ID for a given scheme."""
try:
return next(
transport_id
for transport_id, transport in self.registered_transports.items()
if scheme in transport.schemes
)
except StopIteration:
pass
def get_running_transport_for_scheme(self, scheme: str) -> str:
"""Find the running transport ID for a given scheme."""
try:
return next(
transport_id
for transport_id, transport in self.running_transports.items()
if scheme in transport.schemes
)
except StopIteration:
pass
def get_running_transport_for_endpoint(self, endpoint: str):
"""Find the running transport ID to use for a given endpoint."""
# Grab the scheme from the uri
scheme = urlparse(endpoint).scheme
if scheme == "":
raise OutboundDeliveryError(
f"The uri '{endpoint}' does not specify a scheme"
)
# Look up transport that is registered to handle this scheme
transport_id = self.get_running_transport_for_scheme(scheme)
if not transport_id:
raise OutboundDeliveryError(
f"No transport driver exists to handle scheme '{scheme}'"
)
return transport_id
def get_transport_instance(self, transport_id: str) -> BaseOutboundTransport:
"""Get an instance of a running transport by ID."""
return self.running_transports[transport_id]
def enqueue_message(self, profile: Profile, outbound: OutboundMessage):
"""
Add an outbound message to the queue.
Args:
profile: The active profile for the request
outbound: The outbound message to deliver
"""
targets = [outbound.target] if outbound.target else (outbound.target_list or [])
transport_id = None
for target in targets:
endpoint = target.endpoint
try:
transport_id = self.get_running_transport_for_endpoint(endpoint)
except OutboundDeliveryError:
pass
if transport_id:
break
if not transport_id:
raise OutboundDeliveryError("No supported transport for outbound message")
queued = QueuedOutboundMessage(profile, outbound, target, transport_id)
queued.retries = self.MAX_RETRY_COUNT
self.outbound_new.append(queued)
self.process_queued()
def enqueue_webhook(
self,
topic: str,
payload: dict,
endpoint: str,
max_attempts: int = None,
metadata: dict = None,
):
"""
Add a webhook to the queue.
Args:
topic: The webhook topic
payload: The webhook payload
endpoint: The webhook endpoint
max_attempts: Override the maximum number of attempts
metadata: Additional metadata associated with the payload
Raises:
OutboundDeliveryError: if the associated transport is not running
"""
transport_id = self.get_running_transport_for_endpoint(endpoint)
queued = QueuedOutboundMessage(None, None, None, transport_id)
if len(endpoint.split("#")) > 1:
endpoint_hash_split = endpoint.split("#")
endpoint = endpoint_hash_split[0]
api_key = endpoint_hash_split[1]
queued.api_key = api_key
queued.endpoint = f"{endpoint}/topic/{topic}/"
queued.metadata = metadata
queued.payload = json.dumps(payload)
queued.state = QueuedOutboundMessage.STATE_PENDING
queued.retries = 4 if max_attempts is None else max_attempts - 1
self.outbound_new.append(queued)
self.process_queued()
def process_queued(self) -> asyncio.Task:
"""
Start the process to deliver queued messages if necessary.
Returns: the current queue processing task or None
"""
if self._process_task and not self._process_task.done():
self.outbound_event.set()
elif self.outbound_new or self.outbound_buffer:
self._process_task = self.loop.create_task(self._process_loop())
self._process_task.add_done_callback(lambda task: self._process_done(task))
return self._process_task
def _process_done(self, task: asyncio.Task):
"""Handle completion of the drain process."""
exc_info = task_exc_info(task)
if exc_info:
LOGGER.exception(
"Exception in outbound queue processing:", exc_info=exc_info
)
if self._process_task and self._process_task.done():
self._process_task = None
async def _process_loop(self):
"""Continually kick off encoding and delivery on outbound messages."""
# Note: this method should not call async methods apart from
# waiting for the updated event, to avoid yielding to other queue methods
while True:
self.outbound_event.clear()
loop_time = get_timer()
upd_buffer = []
retry_count = 0
for queued in self.outbound_buffer:
if queued.state == QueuedOutboundMessage.STATE_DONE:
if queued.error:
LOGGER.exception(
"Outbound message could not be delivered to %s",
queued.endpoint,
exc_info=queued.error,
)
if self.handle_not_delivered and queued.message:
self.handle_not_delivered(queued.profile, queued.message)
continue # remove from buffer
deliver = False
if queued.state == QueuedOutboundMessage.STATE_PENDING:
deliver = True
elif queued.state == QueuedOutboundMessage.STATE_RETRY:
if queued.retry_at < loop_time:
queued.retry_at = None
deliver = True
else:
retry_count += 1
if deliver:
queued.state = QueuedOutboundMessage.STATE_DELIVER
p_time = trace_event(
self.context.settings,
queued.message if queued.message else queued.payload,
outcome="OutboundTransportManager.DELIVER.START."
+ queued.endpoint,
)
self.deliver_queued_message(queued)
trace_event(
self.context.settings,
queued.message if queued.message else queued.payload,
outcome="OutboundTransportManager.DELIVER.END."
+ queued.endpoint,
perf_counter=p_time,
)
upd_buffer.append(queued)
new_pending = 0
new_messages = self.outbound_new
self.outbound_new = []
for queued in new_messages:
if queued.state == QueuedOutboundMessage.STATE_NEW:
if queued.message and queued.message.enc_payload:
queued.payload = queued.message.enc_payload
queued.state = QueuedOutboundMessage.STATE_PENDING
new_pending += 1
else:
queued.state = QueuedOutboundMessage.STATE_ENCODE
p_time = trace_event(
self.context.settings,
queued.message if queued.message else queued.payload,
outcome="OutboundTransportManager.ENCODE.START",
)
self.encode_queued_message(queued)
trace_event(
self.context.settings,
queued.message if queued.message else queued.payload,
outcome="OutboundTransportManager.ENCODE.END",
perf_counter=p_time,
)
else:
new_pending += 1
upd_buffer.append(queued)
self.outbound_buffer = upd_buffer
if self.outbound_buffer:
if (not new_pending) and (not retry_count):
await self.outbound_event.wait()
elif retry_count:
# only retries - yield here so we don't hog resources
await asyncio.sleep(0.05)
else:
break
def encode_queued_message(self, queued: QueuedOutboundMessage) -> asyncio.Task:
"""Kick off encoding of a queued message."""
queued.task = self.task_queue.run(
self.perform_encode(queued),
lambda completed: self.finished_encode(queued, completed),
)
return queued.task
async def perform_encode(self, queued: QueuedOutboundMessage):
"""Perform message encoding."""
transport = self.get_transport_instance(queued.transport_id)
wire_format = transport.wire_format or self.context.inject(BaseWireFormat)
session = await queued.profile.session()
queued.payload = await wire_format.encode_message(
session,
queued.message.payload,
queued.target.recipient_keys,
queued.target.routing_keys,
queued.target.sender_key,
)
def finished_encode(self, queued: QueuedOutboundMessage, completed: CompletedTask):
"""Handle completion of queued message encoding."""
if completed.exc_info:
queued.error = completed.exc_info
queued.state = QueuedOutboundMessage.STATE_DONE
else:
queued.state = QueuedOutboundMessage.STATE_PENDING
queued.task = None
self.process_queued()
def deliver_queued_message(self, queued: QueuedOutboundMessage) -> asyncio.Task:
"""Kick off delivery of a queued message."""
transport = self.get_transport_instance(queued.transport_id)
queued.task = self.task_queue.run(
transport.handle_message(
queued.profile,
queued.payload,
queued.endpoint,
queued.metadata,
queued.api_key,
),
lambda completed: self.finished_deliver(queued, completed),
)
return queued.task
def finished_deliver(self, queued: QueuedOutboundMessage, completed: CompletedTask):
"""Handle completion of queued message delivery."""
if completed.exc_info:
queued.error = completed.exc_info
| |
#!/usr/bin/env python3
import configparser
from opcua import Client
from opcua import ua
import paho.mqtt.client as mqtt
import json
import logging
import sys
import time
from pysnmp.hlapi import *
import shelve
## TODO: Add Sphinx
## TODO: Add secure login methods
# TEST TRAVIS
# Handler class for OPC/UA events
class SubHandler(object):
"""
Subscription Handler. To receive events from server for a subscription
data_change and event methods are called directly from receiving thread.
Do not do expensive, slow or network operation there. Create another
thread if you need to do such a thing
"""
def __init__(self):
# Singelton instance of the main controll class
self.control = Control()
# Dict of status nodes -> remembers the last value to decide
self.nodes = {}
# Check if PLC workload is running
def checkProcess(self,node,val):
if val == 0:
# Process have stopped => reset/slow down polling interval
self.control.resetPollInterval(self)
else:
# Process have started => change/speed up polling interval
self.control.changePollInterval()
# Datachange event from the OPC/UA server
def datachange_notification(self, node, val, data):
#debug example: print("OPC/UA: New data change event", node, val,type(data),data)
if node in self.nodes:
# Check control value
self.checkProcess(node,val)
else:
# Create a first node
self.nodes[node] = val
self.checkProcess(node,val)
# OpcClient class to handle all OPC/UA communication
class OpcClient:
def __init__(self, opc_url, variables, settings, persistency, history_length):
# OPC/UA server url
self.opc_url = opc_url
# OPC/UA variables addresses
self.variables = variables
# OPC/UA variables config parameters
self.settings = settings
# subscription objects
self.handlers = {}
self.subscription = None
# OPC/UA connection from client to a server
self.client = None
# Local registers
self.registers = {}
# State flag
self.init = True
# Persistency flag
self.persistency = persistency
# History length allocation
self.history_length = int(history_length)
# Create session to the OPC/UA server
def login(self):
# Init local registers
for key, val in self.variables.items():
self.registers[key] = {}
self.registers[key]["min"] = None
self.registers[key]["max"] = None
self.registers[key]["register_timestamp"] = None
# Create session
try:
self.client = Client(self.opc_url)
self.client.connect()
except Exception as e:
raise Exception("OPC/UA server is not available. Please check connectivity by cmd tools")
logging.info("Client connected to a OPC/UA server" + str(self.opc_url))
# Logout from the OPC/UA server
def logout(self):
try:
self.client.disconnect()
except Exception as e:
raise Exception("OPC/UA server is not available for logout command. Please check connectivity by cmd tools")
logging.info("Logout form OPC/UA server")
# Clear value of local registers
def clearRegister(self, name):
self.registers[name]["min"] = None
self.registers[name]["max"] = None
self.registers[name]["register_timestamp"] = None
# Store data persistently
def storeData(self,data,key):
pd = shelve.open(self.persist_data)
try:
tmp_value = data["value"]
old_persist_value = pd[key]["value"]
# Check lenght of stored data
if len(old_persist_value) <= self.history_length:
data["value"] = old_persist_value.append(tmp_value)
pd[key] = data
else:
# Remove the oldest value
old_persist_value.pop(0)
data["value"] = old_persist_value.append(tmp_value)
pd[key] = data
except Exception as e:
# Init data structure for the key
data["value"] = [data["value"]]
pd[key] = data
pd.close()
# Return stored persistent data
def getStoredData(self, key):
pd = shelve.open(self.persist_data)
data = pd.get(key)
pd.close()
return data
# TODO: Create support for more status variables -> right now the self.init flag is a limitation
# Read data from OPC/UA server from predifined variables
def pollData(self):
data = {}
for key, val in self.variables.items():
node = self.client.get_node(val)
data[key] = {}
data[key]["value"] = node.get_value()
data[key]["role"] = "normal"
data[key]["register_min"] = "n/a"
data[key]["register_max"] = "n/a"
data[key]["register_timestamp"] = "n/a"
# Custom configuration parameters
try:
for param_key, param_val in self.settings[key].items():
# Add settings parameters to the data structure
if param_key == "register":
config = param_val.split(",")
for config_param in config:
if config_param == "min":
# Check and init the first value
if self.registers[key]["min"] == None:
self.registers[key]["min"] = data[key]["value"]
# Add timestmap for registers
if self.registers[key]["register_timestamp"] == None:
self.registers[key]["register_timestamp"] = time.time()*1000
data[key]["register_timestamp"] = time.time()*1000
elif int(self.registers[key]["min"]) > int(data[key]["value"]):
self.registers[key]["min"] = data[key]["value"]
data[key]["register_min"] = self.registers[key]["min"]
data[key]["register_timestamp"] = self.registers[key]["register_timestamp"]
elif config_param == "max":
# Check and init the first value
if self.registers[key]["max"] == None:
self.registers[key]["max"] = data[key]["value"]
# Add timestmap for registers
if self.registers[key]["register_timestamp"] == None:
self.registers[key]["register_timestamp"] = time.time()*1000
data[key]["register_timestamp"] = time.time()*1000
elif int(self.registers[key]["max"]) < int(data[key]["value"]):
self.registers[key]["max"] = data[key]["value"]
data[key]["register_max"] = self.registers[key]["max"]
data[key]["register_timestamp"] = self.registers[key]["register_timestamp"]
else:
logging.error("Invalid option for register parameter in the configuration file")
if param_key == "state" and self.init:
# Create subription
self.createSubscription(val)
self.init = False
if param_key == "state":
data[key]["role"] = "status"
# Key for specific configuration does not exist
except Exception as e:
pass
if self.persistency == "True":
storeData(data[key],key)
return data
# Create a subscription and store the connection handle
def createSubscription(self, address):
try:
handler = SubHandler()
self.subscription = self.client.create_subscription(500, handler)
handle = self.subscription.subscribe_data_change(self.client.get_node(address))
self.handlers[address] = handle
except Exception as e:
raise Exception("Unable to create subscription to OPC/UA server address", address)
logging.info("Subscription created for address " + address)
# Delete subscrition
def unsubscribeSubscriptions(self, address=None):
if len(self.handlers) == 0:
return True
# Unsubscribe defined connection handlers
if address is not None:
self.subscription.unsubscribe(self.handlers[address])
# Unsubscribe all connection handlers
else:
for handler in self.handlers:
self.subscription.unsubscribe(handler)
self.subscription.delete()
# Check handler count
if len(self.handlers) == 0:
# Close subscription
self.subscription.delete()
# SNMP class to communicate with IOS-XE part
class SnmpClient:
def __init__(self, gw_ip, community):
self.gw_ip = gw_ip
self.community = community
self.oid = {"latitude": "iso.3.6.1.4.1.9.9.661.1.4.1.1.1.4.4038",
"longtitude": "iso.3.6.1.4.1.9.9.661.1.4.1.1.1.5.4038",
"timestamp": "iso.3.6.1.4.1.9.9.661.1.4.1.1.1.6.4038"
}
# Get GPS coordinates from IR1101 Cellular module
def getCoordinates(self):
coordinates = {"latitude":0,"longtitude":0,"timestamp":0}
for key,val in self.oid.items():
iterator = getCmd(SnmpEngine(),
CommunityData(self.community),
UdpTransportTarget((self.gw_ip, 161)),
ContextData(),
ObjectType(ObjectIdentity(val)))
errorIndication, errorStatus, errorIndex, varBinds = next(iterator)
for varBind in varBinds:
# Reformat timestamp vlaue to a human string
if key == "timestamp":
coordinates[key] = bytes.fromhex(varBind.prettyPrint().split("=")[1].strip()[2:]).decode("utf-8")[:-1]
else:
coordinates[key] = varBind.prettyPrint().split("=")[1].strip()[2:]
return coordinates
# Handles all activites around MQTT
class MqttClient:
def __init__(self, broker,port,topic,snmp_client):
self.broker = str(broker)
self.topic = str(topic)
self.port = int(port)
self.mqtt_client = mqtt.Client(client_id="iox-app", clean_session=False)
self.snmp_client = snmp_client
#self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_message = self.on_message
self.control = None
# Login to the MQTT broker
def login(self):
try:
self.mqtt_client.connect(host=self.broker,port=int(self.port),keepalive=60)
self.control = Control()
except Exception as e:
raise Exception("MQTT broker is not available. Please check connectivity by cmd tools")
logging.info("MQTT client is connected to the broker" + self.broker)
# Logout from the MQTT broker
def logout(self):
self.mqtt_client.disconnect()
logging.info("MQTT client is disconnected from the broker" + self.broker)
# Process received message - commands
def on_message(self,client, data, msg):
payload_data = json.loads(str(msg.payload.decode()))
for cmd_key, cmd_val in payload_data.items():
if cmd_key == "poll":
self.control.poll_interval = cmd_val
logging.info("Received command from the server: "+cmd_key+":"+cmd_val)
elif cmd_key == "clear":
self.control.opc_client.clearRegister(cmd_val)
logging.info("Received command from the server: "+cmd_key+":"+cmd_val)
elif cmd_key == "getData":
data = self.control.opc_client.getStoredData(cmd_val)
logging.info("Received command from the server: "+cmd_key+":"+cmd_val)
self.mqtt_client.publish(self.topic+cmd_val+"/storedData",payload=str(data), qos=0, retain=False)
logging.info("Command reply sent back: ")
else:
logging.error("Unknown command from MQTT")
# Send MQTT data to the broker
def sendData(self,data):
# Add GPS
gps_data = self.snmp_client.getCoordinates()
# Prepare data records for each OPC/UA variable
for record_key, record_val in data.items():
# Add timestamp in ms
# NOTE: Maybe it is better to use time from GPS
record_val["timestamp"] = time.time()*1000
# Latitude - check if GPS is working if not add the static value -> Charles Square, Prague, CZE
if gps_data["latitude"][4] == "0":
record_val["gps_lat"] = 50.0754072
else:
record_val["gps_lat"] = gps_data["latitude"]
# Longtitude - check if GPS is working if not add the static value -> Charles Square, Prague, CZE
if gps_data["longtitude"][4] == "0":
record_val["gps_long"] = 14.4165971
else:
record_val["gps_long"] = gps_data["longtitude"]
ret = self.mqtt_client.publish(self.topic+record_key,payload=str(record_val), qos=0, retain=False)
# Subscribe to MQTT to receive commands
def subscribe(self):
try:
self.mqtt_client.subscribe(self.topic+"command")
self.mqtt_client.loop_start()
except Exception as e:
raise Exception("Unable to subscribe topic",self.topic+"command")
logging.debug("MQTT topic "+self.topic+" has been subscribed")
# Class to parse configuration data
class Config:
def __init__(self,filename):
self.config = configparser.ConfigParser()
self.config.read(filename)
# Get the general section
def getGeneral(self):
try:
general = self.config["general"]
# Test polling to int
tmp = general["polling"]
int(tmp)
# Test polling to int
tmp = general["polling_change"]
int(tmp)
# Simple test to ip address
tmp = general["mqtt_broker"]
if len(tmp.split(".")) != 4:
raise Exception("IP adrress of MQTT broker is not formated correctly")
# Simple test to port
tmp = general["mqtt_port"]
int(tmp)
# Simple test to opc server format
tmp = general["opc_server"]
if tmp.split("@")[0] != "opc.tcp://":
raise Exception("OPC server address must start with 'opc.tcp://'")
| |
import secrets
from typing import List
from asyncio import create_task
import re
from discord.ext.commands import Cog, Context
import discord
from lib.status_codes import StatusCodes as sc
from lib.pool_types import PoolType
from lib.config import logger, FAKE_GUILD_IDS
from src.auto_response import GuildAutoResponses
from src.api.util import fetch_guild
from src.api.pools import Pools
from src.api.mock_discord import MockMember, MockMessage, LogActions, MockGuild
from lib.ipc import manager_pb2 as message
from src.utils import guild_to_dict, lavasong_to_dict
url_rx = re.compile(r'https?://(?:www\.)?.+')
class Api(Cog):
def __init__(self, bot):
self.bot = bot
self.fake_messages = {}
self.pools = Pools(bot)
async def api_entry(self, method_name, *args, **kwargs):
"""Callback method for the rpc server
:param method_name: name of the method to execute
:param *args: args to pass through
:param **kwargs: kwargs to pass through
"""
try:
assert not method_name.startswith('_')
method = getattr(self, method_name)
except (AttributeError, AssertionError):
logger.warning(f"Someone tried to call '{method}' but it doesn't exist (or is private)")
return {"message": "No such method"}, sc.NOT_FOUND_404
try:
return await method(*args, **kwargs)
except Exception as e:
logger.exception(f"caught exception while handling remote request")
return {"message": f"'{e}'"}, sc.INTERNAL_SERVER_ERROR_500
async def ping(self):
return {'message': 'pong'}, sc.OK_200
async def guild_count(self):
try:
resp = await self.bot.manager_client.guild_count(message.GuildCountRequest())
return {'guild_count': resp.guild_count, 'user_count': resp.user_count}, sc.OK_200
except Exception:
logger.info(f"Shard {self.bot.shard_id} failed to get guild count from manager")
return {'guild_count': -1, 'user_count': -1}, sc.INTERNAL_SERVER_ERROR_500
async def all_guilds(self):
all_guilds = []
for g in await self.bot.manager_client.all_guilds(message.AllGuildsRequest()):
all_guilds.append({
'id': g.id,
'name': g.name,
'icon': g.icon,
'region': g.region,
'description': g.description,
'preferred_locale': g.preferred_locale,
'member_count': g.member_count,
})
return {'guilds': all_guilds}, sc.OK_200
@fetch_guild
async def set_response(self, guild, member_id, trigger, response, reply):
member = guild.get_member(int(member_id))
responses = self.bot.cogs['Auto Responses']
result = await responses.new_response(trigger, response, guild, member, reply)
return {'content': result}, 200
@fetch_guild
async def remove_response(self, guild, member_id, trigger):
member = guild.get_member(int(member_id))
responses = self.bot.cogs['Auto Responses']
result = await responses._remove(guild, trigger, member)
return {'content': result}, 200
async def get_playlist(self, guild_id):
voice = self.bot.lavalink.player_manager.get(guild_id)
if voice is None:
return {}, sc.OK_200
else:
dicts = [lavasong_to_dict(s) for s in voice.queue]
return {'playlist': dicts}, sc.OK_200
@fetch_guild
async def queue_song(self, guild, uid, song):
lava_cog = self.bot.cogs['Voice']
if guild is None:
return {}, sc.BAD_REQUEST_400
user = guild.get_member(uid)
if user is None:
return {}, sc.BAD_REQUEST_400
try:
await lava_cog.ensure_voice(user, guild, True)
except discord.CommandInvokeError:
return {}, sc.UNAUTHORIZED_401
added_songs = await lava_cog.enqueue(song, user, guild)
if added_songs == []:
return {}, sc.NOT_FOUND_404
elif added_songs[0] == 'playlist':
return {'playlist': added_songs}, sc.OK_200
else:
return {lavasong_to_dict(added_songs[1])}, sc.OK_200
async def users_guilds(self, user_id):
users_guilds = []
for guild in self.bot.guilds:
member = guild.get_member(int(user_id))
if member is not None:
settings = self.bot.settings[guild]
g = guild_to_dict(guild)
g.update({
"has_architus": True,
"architus_admin": int(user_id) in settings.admins_ids,
'permissions': member.guild_permissions.value,
})
users_guilds.append(g)
return users_guilds, sc.OK_200
async def is_member(self, user_id, guild_id):
'''check if user is a member or admin of the given guild'''
guild = self.bot.get_guild(int(guild_id))
if not guild:
return {'member': False, 'admin': False}, sc.OK_200
settings = self.bot.settings[guild]
member = guild.get_member(int(user_id))
super_admin = user_id in (214037134477230080,)
return {
'member': bool(member) or super_admin,
'admin': int(user_id) in settings.admins_ids or super_admin,
'permissions': member.guild_permissions.value if member else 0,
}, sc.OK_200
async def get_permissions(self, user_id: int, guild_id: int):
guild = self.bot.get_guild(int(guild_id))
settings = self.bot.settings[guild]
default = not guild or not settings and user_id not in settings.admin_ids
return {'permissions': 274 if default else 65535}
async def delete_response(self, user_id, guild_id, trigger):
return {'message': "No such command."}, sc.NOT_FOUND_404
async def fetch_user_dict(self, id):
usr = self.bot.get_user(int(id))
if usr is None:
return {'message': "No such user"}, sc.NOT_FOUND_404
return {
'name': usr.name,
'avatar': usr.avatar,
'discriminator': usr.discriminator
}, sc.OK_200
async def get_emoji(self, id):
e = self.bot.get_emoji(int(id))
if e is None:
return {'message': "No such emoji"}, sc.NOT_FOUND_404
return {
'name': e.name,
'url': str(e.url)
}, sc.OK_200
async def get_guild_emojis(self, guild_id):
emoji_manager = self.bot.cogs['Emoji Manager'].managers[guild_id]
return {'emojis': [{
'id': str(e.id),
'name': e.name,
'authorId': str(e.author_id) if e.author_id is not None else None,
'loaded': e.loaded,
'numUses': e.num_uses,
'discordId': str(e.discord_id),
'url': await e.url(),
} for e in emoji_manager.emojis]}, sc.OK_200
async def get_extensions(self):
return {'extensions': [k for k in self.bot.extensions.keys()]}, sc.OK_200
async def reload_extension(self, extension_name):
name = extension_name.replace('-', '.')
try:
self.bot.reload_extension(name)
except discord.ext.commands.errors.ExtensionNotLoaded as e:
logger.exception("Couldn't load extension")
return {"message": f"Extension Not Loaded: {e}"}, sc.SERVICE_UNAVAILABLE_503
return {"message": "Reload signal sent"}, sc.OK_200
@fetch_guild
async def bin_messages(self, guild, member_id):
stats_cog = self.bot.cogs["Server Statistics"]
emoji_manager = self.bot.cogs["Emoji Manager"].managers[guild.id]
data = stats_cog.cache.get(guild.id, None)
member = guild.get_member(member_id)
if data is None or member is None:
return {'message': "unknown member or guild"}, sc.NOT_FOUND_404
return {
'member_count': data.member_count,
'architus_count': data.architus_count(member),
'message_count': data.message_count(member),
'common_words': data.common_words(member),
'mention_counts': data.mention_counts(member),
'member_counts': data.member_counts(member),
'channel_counts': data.channel_counts(member),
'time_member_counts': data.times_as_strings(member),
'up_to_date': data.up_to_date,
'forbidden': data.forbidden,
'last_activity': data.last_activity(member).isoformat(),
'popular_emojis': [str(e.id) for e in emoji_manager.emojis[:10]],
}, sc.OK_200
@fetch_guild
async def get_guild_data(self, guild):
return {
'name': guild.name,
'member_count': guild.member_count,
}, sc.OK_200
@fetch_guild
async def load_max_emojis(self, guild: discord.Guild, member_id: int):
emoji_manager = self.bot.cogs['Emoji Manager'].managers[guild.id]
if member_id not in self.bot.settings[guild].admin_ids:
return {'message': "only admins may load max emoji"}, sc.UNAUTHORIZED_401
emojis = await emoji_manager.load_max_emojis()
return {'emojis': [e.as_dict() for e in emojis]}, sc.OK_200
@fetch_guild
async def load_emoji(self, guild: discord.Guild, emoji_id: int, member_id: int):
emoji_manager = self.bot.cogs['Emoji Manager'].managers[guild.id]
emoji = emoji_manager.find_emoji(a_id=emoji_id)
if emoji is None:
return {'message': "unknown emoji"}, sc.BAD_REQUEST_400
await emoji_manager.load_emoji(emoji)
return {'message': "successfully loaded"}, sc.OK_200
@fetch_guild
async def cache_emoji(self, guild: discord.Guild, emoji_id: int, member_id: int):
emoji_manager = self.bot.cogs['Emoji Manager'].managers[guild.id]
emoji = emoji_manager.find_emoji(a_id=emoji_id)
if member_id not in self.bot.settings[guild].admin_ids:
return {'message': "only admins may manually cache emoji"}, sc.UNAUTHORIZED_401
if emoji is None:
return {'message': "unknown emoji"}, sc.BAD_REQUEST_400
await emoji_manager.cache_emoji(emoji)
return {'message': "successfully cached"}, sc.OK_200
@fetch_guild
async def delete_emoji(self, guild: discord.Guild, emoji_id: int, member_id: int):
member = guild.get_member(member_id)
emoji_manager = self.bot.cogs['Emoji Manager'].managers[guild.id]
emoji = emoji_manager.find_emoji(a_id=emoji_id)
if emoji is None:
return {'message': "unknown emoji"}, sc.BAD_REQUEST_400
if emoji.author_id != member.id and member.id not in self.bot.settings[guild].admin_ids:
return {'message': "you must own this emoji or have admin permissions"}, sc.UNAUTHORIZED_401
await emoji_manager.delete_emoji(emoji)
return {'message': "successfully deleted"}, sc.OK_200
@fetch_guild
async def settings_access(self, guild, setting=None, value=None):
settings = self.bot.settings[guild]
if hasattr(settings, setting):
return {'value': getattr(settings, setting)}, sc.OK_200
return {'value': "unknown setting"}, sc.NOT_FOUND_404
@fetch_guild
async def role_setup(self, guild, channel_id, member_id, react_roles):
settings = self.bot.settings[guild]
member = guild.get_member(int(member_id))
channel = guild.get_channel(int(channel_id))
if member is None or member.id not in settings.admin_ids:
return {'content': "Admins only"}, sc.UNAUTHORIZED_401
resp = await self.bot.cogs['Roles'].setup_roles(
guild, channel, {e: guild.get_role(int(r)) for e, r in react_roles.items()})
return {'content': resp}, sc.OK_200
async def tag_autbot_guilds(self, guild_list, user_id: int):
try:
all_guilds = [guild for guild in await self.bot.manager_client.all_guilds(message.AllGuildsRequest())]
except Exception:
logger.exception(f"Shard {self.bot.shard_id} failed to get guild list from manager")
return {'guilds': []}, sc.INTERNAL_SERVER_ERROR_500
for guild_dict in guild_list:
for guild in all_guilds:
if guild.id == int(guild_dict['id']):
guild_dict['has_architus'] = True
guild_dict['architus_admin'] = user_id in guild.admin_ids
break
else:
guild_dict.update({'has_architus': False, 'architus_admin': False})
return {'guilds': guild_list}, sc.OK_200
async def pool_request(self, user_id, guild_id, pool_type: str, entity_ids, fetch=False):
guild = self.bot.get_guild(int(guild_id)) if guild_id else None
resp = {'data': [], 'nonexistant': []}
if pool_type == PoolType.MEMBER:
tasks = {eid: create_task(self.pools.get_member(guild, eid, fetch)) for eid in entity_ids}
elif pool_type == PoolType.USER:
tasks = {eid: create_task(self.pools.get_user(eid, fetch)) for eid in entity_ids}
elif pool_type == PoolType.EMOJI:
tasks = {eid: create_task(self.pools.get_emoji(guild, eid, fetch)) for eid in entity_ids}
elif pool_type == PoolType.GUILD:
tasks = {eid: create_task(self.pools.get_guild(user_id, eid, fetch)) for eid in entity_ids}
else:
raise Exception(f"unknown pool type: {pool_type}")
for entity_id, task in tasks.items():
try:
resp['data'].append(await task)
except Exception as e:
logger.exception(e)
resp['nonexistant'].append(entity_id)
return resp, sc.OK_200
@fetch_guild
async def pool_all_request(self, guild, pool_type: str):
if pool_type == PoolType.MEMBER:
# return {'message': "Invalid Request"}, sc.BAD_REQUEST_400
return {'data': self.pools.get_all_members(guild)}, sc.OK_200
elif pool_type == PoolType.CHANNEL:
return {'data': self.pools.get_all_channels(guild)}, sc.OK_200
elif pool_type == PoolType.ROLE:
return {'data': self.pools.get_all_roles(guild)}, sc.OK_200
elif pool_type == PoolType.USER:
return {'message': "Invalid Request"}, sc.BAD_REQUEST_400
elif pool_type == PoolType.EMOJI:
return {'data': await self.pools.get_all_emoji(guild)}, sc.OK_200
elif pool_type == PoolType.GUILD:
return {'error': "Invalid Pool"}, sc.BAD_REQUEST_400
elif pool_type == PoolType.AUTO_RESPONSE:
return {'data': self.pools.get_all_responses(guild)}, sc.OK_200
elif pool_type == PoolType.SETTING_VALUE:
pass
else:
return {'error': "Unknown Pool"}, sc.BAD_REQUEST_400
async def twitch_update(self, stream):
twitch_update = self.bot.cogs['Twitch Notification']
await twitch_update.update(stream)
return {}, sc.OK_200
async def handle_mock_user_action(
self,
action: int = None,
messageId: int = None,
guildId: int = None,
content: str = None,
allowedCommands: List[str] = (),
emoji: str = None,
silent: bool = False):
message_id = messageId
guild_id = guildId
allowed_commands = allowedCommands
# this is very scuffed. guilds under this number won't have their responses added to the db
assert guild_id < FAKE_GUILD_IDS
if action is None or | |
<filename>src/CACSLabeler/CACSLabelerModule/CACSLabelerModule.py
from __main__ import vtk, qt, ctk, slicer
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from slicer.ScriptedLoadableModule import ScriptedLoadableModule
import unittest
import os, sys
import SimpleITK as sitk
import sitkUtils as su
import EditorLib
import Editor
import LabelStatistics
from collections import defaultdict, OrderedDict
from EditorLib.EditUtil import EditUtil
from glob import glob
import random
import numpy as np
from SimpleITK import ConnectedComponentImageFilter
import json
import sys
import time
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from CalciumScores.Agatston import Agatston
from CalciumScores.VolumeScore import VolumeScore
from CalciumScores.DensityScore import DensityScore
from CalciumScores.NumLesions import NumLesions
from CalciumScores.LesionVolume import LesionVolume
from CalciumScores.CalciumScoreBase import CalciumScoreBase
from collections import defaultdict, OrderedDict
import imp
imp.reload(sys.modules['CalciumScores'])
import csv
dirname = os.path.dirname(os.path.abspath(__file__))
dir_src = os.path.dirname(os.path.dirname(dirname))
sys.path.append(dir_src)
from CACSTree.CACSTree import CACSTree, Lesion
from settings.settings import Settings
import shutil
import csv
from csv import reader
import io
############## CACSLabelerModule ##############
# Set parameter
#lowerThresholdValue = 130
lowerThresholdValue = 131 # Set to 131 since sitk threshold also includes boundary 131
upperThresholdValue = 10000
def splitFilePath(filepath):
""" Split filepath into folderpath, filename and file extension
:param filepath: Filepath
:type filepath: str
"""
#folderpath, _ = ntpath.split(filepath)
folderpath = os.path.dirname(filepath)
head, file_extension = os.path.splitext(filepath)
filename = os.path.basename(head)
return folderpath, filename, file_extension
class Image:
def __init__(self, fip_image=None, fip_ref=None, settings=None):
if fip_image is None and fip_ref is not None:
_,ref_name,_ = splitFilePath(fip_ref)
if len(ref_name.split('_'))==1:
if settings['MODE']=='CACS_ORCASCORE':
PatientID = ''
SeriesInstanceUID = ref_name.split('-')[0][0:-1]
print('SeriesInstanceUID123', SeriesInstanceUID)
image_name = SeriesInstanceUID
else:
PatientID = ''
SeriesInstanceUID = ref_name.split('-')[0]
image_name = SeriesInstanceUID
else:
PatientID = ref_name.split('_')[0]
SeriesInstanceUID = ref_name.split('_')[1].split('-')[0]
image_name = PatientID + '_' + SeriesInstanceUID
self.fip_ref = fip_ref
self.ref_name = ref_name
self.fip_image = ''
self.image_name = image_name
self.PatientID = PatientID
self.SeriesInstanceUID = SeriesInstanceUID
if fip_image is not None and fip_ref is None:
_,image_name,_ = splitFilePath(fip_image)
if len(image_name.split('_'))==1:
PatientID = ''
SeriesInstanceUID = image_name
else:
PatientID = image_name.split('_')[0]
SeriesInstanceUID = image_name.split('_')[1]
image_name = PatientID + '_' + SeriesInstanceUID
self.fip_ref = None
self.ref_name = image_name + '-label-lesion'
self.fip_image = ''
self.image_name = image_name
self.PatientID = PatientID
self.SeriesInstanceUID = SeriesInstanceUID
self.scores = []
self.arteries_dict = dict()
self.arteries_sum = dict()
def findImage(self, images, dataset):
if dataset=='ORCASCORE':
for image in images:
_,name,_ = splitFilePath(image)
if self.image_name == name[0:-3]:
self.fip_image = image
print('image567', image)
else:
for image in images:
_,name,_ = splitFilePath(image)
if self.image_name == name:
self.fip_image = image
def setRef_name(self, ref_name):
if len(ref_name.split('_'))==1:
PatientID = ''
SeriesInstanceUID = ref_name.split('-')[0]
image_name = SeriesInstanceUID
else:
PatientID = ref_name.split('_')[0]
SeriesInstanceUID = ref_name.split('_')[1].split('-')[0]
image_name = PatientID + '_' + SeriesInstanceUID
self.PatientID = PatientID
self.SeriesInstanceUID = SeriesInstanceUID
self.image_name = image_name
self.ref_name = ref_name
def scoreExist(self, scorename):
for s in self.scores:
if s['NAME'] == scorename:
return True
return False
def deleteScore(self, scorename):
for i,s in enumerate(self.scores):
if s['NAME'] == scorename:
del self.scores[i]
return True
return False
class CACSLabelerModule(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "CACSLabelerModule"
self.parent.categories = ["Examples"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME>, Charite"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """
This file was originally developed by <NAME>-Robin, Kitware Inc.
and <NAME>, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
# CACSLabelerModuleWidget
class CACSLabelerModuleWidget:
def __init__(self, parent = None):
self.currentRegistrationInterface = None
self.changeIslandTool = None
self.editUtil = EditorLib.EditUtil.EditUtil()
self.inputImageNode = None
self.localCardiacEditorWidget = None
self.filepath_settings = None
self.settings=Settings()
self.imagelist=[]
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
# Settings filepath
currentFile = os.path.dirname(os.path.abspath(__file__))
self.filepath_settings = os.path.dirname(os.path.dirname(os.path.dirname(currentFile))) + '/data/settings_CACSLabeler.json'
def setup(self):
# Instantiate and connect widgets ...
#
# Reload and Test area
#
if True:
"""Developer interface"""
reloadCollapsibleButton = ctk.ctkCollapsibleButton()
reloadCollapsibleButton.text = "Advanced - Reload && Test"
reloadCollapsibleButton.collapsed = False
self.layout.addWidget(reloadCollapsibleButton)
reloadFormLayout = qt.QFormLayout(reloadCollapsibleButton)
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "CACSLabelerModule Reload"
reloadFormLayout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# Collapsible button for Input Parameters
self.measuresCollapsibleButton = ctk.ctkCollapsibleButton()
self.measuresCollapsibleButton.text = "Input Parameters"
self.layout.addWidget(self.measuresCollapsibleButton)
# Collapsible button for Label Parameters
self.labelsCollapsibleButton = ctk.ctkCollapsibleButton()
self.labelsCollapsibleButton.text = "Label Parameters"
#self.layout.addWidget(self.labelsCollapsibleButton)
# Layout within the sample collapsible button
self.measuresFormLayout = qt.QFormLayout(self.measuresCollapsibleButton)
self.labelsFormLayout = qt.QFormLayout(self.labelsCollapsibleButton)
# Load input button
loadInputButton = qt.QPushButton("Load input data")
loadInputButton.toolTip = "Load data to label"
loadInputButton.setStyleSheet("background-color: rgb(230,241,255)")
loadInputButton.connect('clicked(bool)', self.onLoadInputButtonClicked)
self.loadInputButton = loadInputButton
self.measuresFormLayout.addRow(self.loadInputButton)
# The Input Volume Selector
self.inputFrame = qt.QFrame(self.measuresCollapsibleButton)
self.inputFrame.setLayout(qt.QHBoxLayout())
self.measuresFormLayout.addRow(self.inputFrame)
self.inputSelector = qt.QLabel("Input Volume: ", self.inputFrame)
self.inputFrame.layout().addWidget(self.inputSelector)
self.inputSelector = slicer.qMRMLNodeComboBox(self.inputFrame)
self.inputSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
self.inputSelector.addEnabled = False
self.inputSelector.removeEnabled = False
#self.inputSelector.currentNodeChanged.connect(self.onCurrentNodeChanged)
self.inputSelector.setMRMLScene( slicer.mrmlScene )
self.inputFrame.layout().addWidget(self.inputSelector)
self.RadioButtonsFrame = qt.QFrame(self.measuresCollapsibleButton)
self.RadioButtonsFrame.setLayout(qt.QHBoxLayout())
self.measuresFormLayout.addRow(self.RadioButtonsFrame)
self.KEV80 = qt.QRadioButton("80 KEV", self.RadioButtonsFrame)
self.KEV80.setToolTip("Select 80 KEV.")
self.KEV80.checked = False
self.KEV80.enabled = False
self.RadioButtonsFrame.layout().addWidget(self.KEV80)
self.KEV120 = qt.QRadioButton("120 KEV", self.RadioButtonsFrame)
self.KEV120.setToolTip("Select 120 KEV.")
self.KEV120.checked = False
self.KEV120.enabled = False
self.RadioButtonsFrame.layout().addWidget(self.KEV120)
# Threshold button
thresholdButton = qt.QPushButton("Threshold Volume")
thresholdButton.toolTip = "Threshold the selected Input Volume"
thresholdButton.setStyleSheet("background-color: rgb(230,241,255)")
self.measuresFormLayout.addRow(thresholdButton)
thresholdButton.connect('clicked(bool)', self.onThresholdButtonClicked)
# Add vertical spacer
self.layout.addStretch(1)
# Set local var as instance attribute
self.thresholdButton = thresholdButton
# sets the layout to Red Slice Only
layoutManager = slicer.app.layoutManager()
layoutManager.setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpRedSliceView)
# Save button
self.saveButton = qt.QPushButton("Save")
self.saveButton.toolTip = "Save data."
self.saveButton.setStyleSheet("background-color: rgb(230,241,255)")
self.saveButton.enabled = False
self.parent.layout().addWidget(self.saveButton)
self.saveButton.connect('clicked()', self.onSaveOutputButtonClicked)
# Delete scans button
deleteButton = qt.QPushButton("Delete data")
deleteButton.toolTip = "Delete data"
deleteButton.setStyleSheet("background-color: rgb(230,241,255)")
deleteButton.connect('clicked(bool)', self.onDeleteButtonClicked)
self.deleteButton = deleteButton
self.parent.layout().addWidget(self.deleteButton)
# Compute calcium scores
scoreButton = qt.QPushButton("Calculate Calcium Scores")
scoreButton.toolTip = "Compute Cacium scores"
scoreButton.setStyleSheet("background-color: rgb(230,241,255)")
scoreButton.enabled = False
scoreButton.connect('clicked(bool)', self.onScoreButtonClicked)
self.scoreButton = scoreButton
self.parent.layout().addWidget(self.scoreButton)
# Add scores
#self.calciumScores = [Agatston(), VolumeScore(), DensityScore(), NumLesions(), LesionVolume()]
#self.calciumScores = [Agatston()]
# Export calcium scores
exportButton = qt.QPushButton("Export Calcium Scores")
exportButton.toolTip = "Export Cacium scores"
exportButton.setStyleSheet("background-color: rgb(230,241,255)")
exportButton.enabled = False
exportButton.connect('clicked(bool)', self.onExportScoreButtonClicked)
self.exportButton = exportButton
self.parent.layout().addWidget(self.exportButton)
# Export calcium scores all refereences
exportButtonRef = qt.QPushButton("Export Calcium Scores from references folder")
exportButtonRef.toolTip = "Export Calcium Scores from references folder"
exportButtonRef.setStyleSheet("background-color: rgb(230,241,255)")
exportButtonRef.enabled = True
exportButtonRef.connect('clicked(bool)', self.onExportScoreButtonRefClicked)
self.exportButtonRef = exportButtonRef
self.parent.layout().addWidget(self.exportButtonRef)
# Read settings file
if os.path.isfile(self.filepath_settings):
#self.writeSettings(self.filepath_settings)
self.readSettings(self.filepath_settings)
else:
self.writeSettings(self.filepath_settings)
self.readSettings(self.filepath_settings)
dirname = os.path.dirname(os.path.abspath(__file__))
filepath_colorTable = dirname + '/CardiacAgatstonMeasuresLUT.ctbl'
# Create color table
if self.settings['MODE']=='CACSTREE_CUMULATIVE':
self.settings['CACSTree'].createColorTable(filepath_colorTable)
elif self.settings['MODE']=='CACS_4':
#self.settings['CACSTree'].createColorTable(filepath_colorTable)
self.settings['CACSTree'].createColorTable_CACS(filepath_colorTable)
else:
self.settings['CACSTree'].createColorTable_CACS(filepath_colorTable)
# Load color table
slicer.util.loadColorTable(filepath_colorTable)
def writeSettings(self, filepath_settings):
self.settings.writeSettings(filepath_settings)
def readSettings(self, filepath_settings):
self.settings.readSettings(filepath_settings)
def onDeleteButtonClicked(self):
""" Delete all images in slicer
"""
# Deleta all old nodes
nodes=slicer.util.getNodesByClass('vtkMRMLScalarVolumeNode')
for node in nodes:
slicer.mrmlScene.RemoveNode(node)
def get_arteries_dict(self):
arteries = self.settings['CACSTree'].getLesionNames()
arteries_dict = OrderedDict()
for key in arteries:
value = self.settings['CACSTree'].getValueByName(key)
if self.settings['MODE']=="CACS_ORCASCORE":
if value>0:
arteries_dict[key] = self.settings['CACSTree'].getValueByName(key)
else:
if value>1:
arteries_dict[key] = self.settings['CACSTree'].getValueByName(key)
arteries_sum = OrderedDict()
# Change order with [::-1] that RCA sum is calculated first and CC sum after
for key in arteries[::-1]:
children = self.settings['CACSTree'].getChildrenNamesByName(key)
value = self.settings['CACSTree'].getValueByName(key)
if not value==0 and len(children)>0:
arteries_sum[key] = self.settings['CACSTree'].getChildrenNamesByName(key)
#print('arteries_dict', arteries_dict)
#print('arteries_sum', arteries_sum)
return arteries_dict, arteries_sum
def extract_slice_step(self, inputVolumeName):
filepath_slice_step = self.settings['filepath_slice_step']
with io.open(filepath_slice_step, 'r', encoding='utf-8-sig') as read_obj:
csv_reader = reader(read_obj)
for row in csv_reader:
patient = row[0]
series = row[2]
name = patient + '_' + series
if name==inputVolumeName:
print('inputVolumeName123', inputVolumeName)
try:
print('row123', row)
slice_step = int(row[5])
slice_thickness = float(row[4])
except ValueError:
print('Type of slice_step is not integer!')
slice_step=1
slice_thickness=3
#print('row', row)
#print('slice_stepXXX', slice_step)
#print('slice_thicknessXXX', slice_thickness)
return slice_step, slice_thickness
return None, None
def onScoreButtonClicked(self):
# Get image and imageLabel
inputVolumeName = self.inputImageNode.GetName()
inputVolumeNameLabel = inputVolumeName + '-label-lesion'
inputVolume = su.PullVolumeFromSlicer(inputVolumeName)
inputVolumeLabel = su.PullVolumeFromSlicer(inputVolumeNameLabel)
slice_step, slice_thickness = self.extract_slice_step(inputVolumeName)
#if slice_step is None:
# raise ValueError('Imagename is not in slice_step csv file!')
start = time.time()
# Compute calcium scores
if self.settings['MODE']=='CACSTREE_CUMULATIVE':
arteries_dict = self.get_arteries_dict()
arteries_sum = OrderedDict()
arteries_sum['RCA'] = self.settings['CACSTree'].getChildrenNamesByName('RCA')
arteries_sum['LM'] = self.settings['CACSTree'].getChildrenNamesByName('LM')
arteries_sum['LAD'] = self.settings['CACSTree'].getChildrenNamesByName('LAD')
arteries_sum['LCX'] = self.settings['CACSTree'].getChildrenNamesByName('LCX')
arteries_sum['AORTA'] = self.settings['CACSTree'].getChildrenNamesByName('AORTA')
arteries_sum['VALVES'] = self.settings['CACSTree'].getChildrenNamesByName('VALVES')
arteries_sum['BONE'] = self.settings['CACSTree'].getChildrenNamesByName('BONE')
arteries_sum['LUNG'] = self.settings['CACSTree'].getChildrenNamesByName('LUNG')
arteries_sum['CC'] = self.settings['CACSTree'].getChildrenNamesByName('CC')
arteries_sum['NCC'] = self.settings['CACSTree'].getChildrenNamesByName('NCC')
elif self.settings['MODE']=='CACS':
arteries_dict = OrderedDict()
arteries_dict['LAD'] = 2
arteries_dict['LCX'] = 3
arteries_dict['RCA'] = 4
arteries_sum = OrderedDict()
arteries_sum['CC'] = ['LAD', 'LCX', 'RCA']
self.calciumScoresResult=[]
for score in self.calciumScores:
for scorename in self.settings['CalciumScores']:
appendCSV=False
CalciumScoreBase.export_csv(self.settings, self.imagelist, appendCSV, | |
backwards compatibility. It is discouraged to use '
'this parameter unless you know what you are doing',
vals=validators.Anything())
self.add_parameter(
'cases',
set_cmd=self._set_cases,
get_cmd=self._get_cases,
docstring='Configures which combination of readout waveforms to actually '
'download to the instrument. As the instrument has a limited amount of memory available, it is '
'not currently possible to store all 1024 possible combinations of readout waveforms that would '
'be required to address the maximum number of qubits supported by the instrument (10). Therefore, '
'the \'cases\' mechanism is used to reduce that number to the combinations actually needed by '
'an experiment. The parameter must be set to a list of integers. The list defines the codewords '
'to be handled by the AWG program. For example, setting the parameter to [1, 5, 7] would result in '
'an AWG program that handles only codewords 1, 5 and 7. When running, if the AWG receives a codeword '
'that is not part of this list, an error will be triggered.',
vals=validators.Lists())
self.add_parameter('dio_calibration_delay',
set_cmd=self._set_dio_calibration_delay,
get_cmd=self._get_dio_calibration_delay,
unit='',
label='DIO Calibration delay',
docstring='Configures the internal delay in 300 MHz cycles (3.3 ns) '
'to be applied on the DIO interface in order to achieve reliable sampling '
'of the codewords. The valid range is 0 to 15.',
vals=validators.Ints())
self.add_parameter(
'minimum_holdoff',
get_cmd=self._get_minimum_holdoff,
unit='s',
label='Minimum hold-off',
docstring='Returns the minimum allowed hold-off between two readout operations.',
vals=validators.Numbers())
def _codeword_table_preamble(self, awg_nr) -> str:
"""
Defines a snippet of code to use in the beginning of an AWG program in order to define the waveforms.
The generated code depends on the instrument type. For the UHF-QA we simply define the raw waveforms.
"""
program = ''
# If the program doesn't need waveforms, just return here
if not self._awg_program_features['waves']:
return program
# If the program needs cases, but none are defined, flag it as an error
if self._awg_program_features['cases'] and self._cases is None:
raise zibase.ziConfigurationError(
'Missing definition of cases for AWG program!')
wf_table = self._get_waveform_table(awg_nr)
for dio_cw, (wf_l, wf_r) in enumerate(wf_table):
csvname_l = self.devname + '_' + wf_l
csvname_r = self.devname + '_' + wf_r
program += 'wave {} = "{}";\n'.format(
wf_l, csvname_l)
program += 'wave {} = "{}";\n'.format(
wf_r, csvname_r)
return program
def plot_dio_snapshot(self, bits=range(32)):
zibase.plot_timing_diagram(self.getv('awgs/0/dio/data'), bits, 64)
##########################################################################
# Overriding Qcodes InstrumentBase methods
##########################################################################
def snapshot_base(self, update: bool=False,
params_to_skip_update =None,
params_to_exclude = None ):
"""
State of the instrument as a JSON-compatible dict.
Args:
update: If True, update the state by querying the
instrument. If False, just use the latest values in memory.
params_to_skip_update: List of parameter names that will be skipped
in update even if update is True. This is useful if you have
parameters that are slow to update but can be updated in a
different way (as in the qdac)
Returns:
dict: base snapshot
"""
if params_to_exclude is None:
params_to_exclude = self._params_to_exclude
snap = {
"functions": {name: func.snapshot(update=update)
for name, func in self.functions.items()},
"submodules": {name: subm.snapshot(update=update)
for name, subm in self.submodules.items()},
"__class__": full_class(self)
}
snap['parameters'] = {}
for name, param in self.parameters.items():
if params_to_exclude and name in params_to_exclude:
pass
elif params_to_skip_update and name in params_to_skip_update:
update_par = False
else:
update_par = update
try:
snap['parameters'][name] = param.snapshot(update=update_par)
except:
logging.info("Snapshot: Could not update parameter: {}".format(name))
snap['parameters'][name] = param.snapshot(update=False)
for attr in set(self._meta_attrs):
if hasattr(self, attr):
snap[attr] = getattr(self, attr)
return snap
##########################################################################
# Private methods
##########################################################################
def _reset_awg_program_features(self) -> None:
"""
Resets the self._awg_program_features to disable all features. The UHFQC can be configured with a number
of application-specific AWG programs using this driver. However, all the programs share some characteristics that
are described in the _awg_program_features dictionary. For example, all of the programs include a main loop
that runs for a number of iterations given by a user register. This feature is indicated by the 'loop_cnt'
item in the dictionary. In contrast, not all program include an extra loop for the number of averages that
should be done. Therefore, the 'awg_cnt' item in the dictionary is not automatically set. The driver
uses these features to keep track of what the current AWG program can do. It then raises errors in case
the user tries to do something that is not supported.
"""
self._awg_program_features = {
'loop_cnt': False,
'avg_cnt': False,
'wait_dly': False,
'waves': False,
'cases': False,
'diocws': False}
def _set_dio_calibration_delay(self, value) -> None:
# Sanity check the value
if value < 0 or value > 15:
raise zibase.ziValueError(
'Trying to set DIO calibration delay to invalid value! Expected value in range 0 to 15. Got {}.'.format(
value))
log.info(f"{self.devname}: Setting DIO calibration delay to {value}")
# Store the value
self._dio_calibration_delay = value
# And configure the delays
self.setd('raw/dios/0/delay', self._dio_calibration_delay)
def _get_dio_calibration_delay(self):
return self._dio_calibration_delay
def _get_minimum_holdoff(self):
if self.qas_0_result_averages() == 1:
holdoff = np.max((800, self.qas_0_integration_length(), self.qas_0_delay()+16))/self.clock_freq()
else:
holdoff = np.max((2560, self.qas_0_integration_length(), self.qas_0_delay()+16))/self.clock_freq()
return holdoff
def _set_wait_dly(self, value) -> None:
self.set('awgs_0_userregs_{}'.format(UHFQC.USER_REG_WAIT_DLY), value)
def _get_wait_dly(self):
return self.get('awgs_0_userregs_{}'.format(UHFQC.USER_REG_WAIT_DLY))
def _set_cases(self, value) -> None:
# Generate error if we don't have an AWG program that supports cases
if not self._awg_program_features['cases']:
raise zibase.ziValueError(
'Trying to define cases for an AWG program that does not support them!')
# Check against number of codewords
if len(value) > self._num_codewords:
raise zibase.ziValueError('Trying to define a number of cases ({}) greater than configured number of codewords ({})!'.format(
len(value), self._num_codewords))
self._cases = value
self._cw_mask = 0
for case in self._cases:
self._cw_mask |= case
if self._awg_program_features['diocws'] and self._diocws is None:
raise zibase.ziValueError(
'AWG program defines DIO output, but no output values have been defined!')
self._awg_program[0] = \
awg_sequence_acquisition_preamble() + """
// Mask for selecting our codeword bits
const CW_MASK = (0x1ff << 17);
// Counts wrong codewords
var err_cnt = 0;
""".format(self._cw_mask)
if self._awg_program_features['diocws']:
self._awg_program[0] += \
_array2vect(self._diocws, "diocws") + """
// Loop once for each DIO codeword to output
for (cvar i = 0; i < {}; i = i + 1) {{""".format(len(self._diocws))
else:
self._awg_program[0] += """
// Loop for all measurements
repeat (loop_cnt) {"""
self._awg_program[0] += """
waitDIOTrigger();
// Get codeword and apply mask
var cw = getDIOTriggered() & CW_MASK;
// Generate waveforms based on codeword output
switch (cw) {"""
# Add each of the cases
# FIXME: note that the actual wave timing (i.e. trigger latency) depends on the number of cases, because the
# switch statement generates a tree of if's internally. Consequentially, the maximum repetition rate also depends
# on the number of cases.
for case in self._cases:
self._awg_program[0] += """
case 0x{:08x}: playWave({}, {});""".format(case << 17, zibase.gen_waveform_name(0, case), zibase.gen_waveform_name(1, case))
# Add a default for ensuring we see something when the other cases fail
self._awg_program[0] += """
default: playWave(ones(32), ones(32)); err_cnt += 1;
}
wait(wait_dly);"""
if self._awg_program_features['diocws']:
self._awg_program[0] += """
setDIO(diocws[i]);
"""
self._awg_program[0] += """
setTrigger(ro_trig);
setTrigger(ro_arm);
}
wait(300);
setTrigger(0);
setUserReg(4, err_cnt);"""
self._awg_needs_configuration[0] = True
def _get_cases(self):
return self._cases
def _get_waveform_table(self, awg_nr: int) -> list:
"""
Returns the waveform table.
The waveform table determines the mapping of waveforms to DIO codewords.
The index of the table corresponds to the DIO codeword.
The entry is a tuple of waveform names.
Example:
["wave_ch7_cw000", "wave_ch8_cw000",
"wave_ch7_cw001", "wave_ch8_cw001",
"wave_ch7_cw002", "wave_ch8_cw002"]
The waveform table generated depends on the awg_nr and the codeword
protocol.
"""
ch = awg_nr*2
wf_table = []
if self.cases() is not None:
for case in self.cases():
wf_table.append((zibase.gen_waveform_name(ch, case),
zibase.gen_waveform_name(ch+1, case)))
return wf_table
##########################################################################
##########################################################################
# Application dependent code starts here:
# - dedicated sequence programs
# - DIO support
# FIXME: move to separate class
##########################################################################
##########################################################################
##########################################################################
# 'public' functions: sequencer functions
##########################################################################
"""
Before acquisition can take place one of "awg_sequence_acquisition_and_"
has to be called. These take care that the right program is uploaded.
The variants are:
awg_sequence_acquisition
start acquisition after receiving a trigger, play no pulse
awg_sequence_acquisition_and_pulse
start acquisition after receiving a trigger,
play the specified pulse
awg_sequence_acquisition_and_pulse_SSB
start acquisition after receiving a trigger,
play an SSB pulse based on specified parameters
awg_sequence_acquisition_and_DIO_triggered_pulse
start acquisition after receiving a DIO trigger,
play the pulse specified by the received DIO codeword
cases argument specifies what codewords are | |
from enum import Enum
import cv2 as cv
import numpy as np
import pyautogui
pyautogui.FAILSAFE = False
pyautogui.PAUSE = 0
mouse_now = pyautogui.position()
SCREEN_SIZE = pyautogui.size()
CURSOR_SPEED_MULTIPLIER = 2
# MAX_DELTA = 20
MIN_DELTA_X = 1
MIN_DELTA_Y = 2
CURSOR_POOL = 5
KEY_WAIT_DURATION = 1
CALIBRATION_DIMENSIONS = (4, 11)
BLOB_DIST = 26
CAMERA_CALIBRATION_REQUIRED = 1
MAIN_WINDOW_NAME = 'main'
MASK_WINDOW_NAME = 'mask'
DATA_DIRECTORY = './data/'
class PluginStatus(Enum):
TRY_LOAD_CALIBRATE_CAMERA = 1
CALIBRATE_CAMERA = 2
TRY_LOAD_HOMOGRAPHY_MTX = 3
CALCULATE_HOMOGRAPHY_MTX = 4
TRY_LOAD_CALIBRATE_PEN = 5
CALIBRATE_PEN = 6
TRY_LOAD_CALIBRATE_SHADOW = 7
CALIBRATE_SHADOW = 8
READY = 9
class Plugin:
def __init__(self):
self.real_world_point_coordinate = np.zeros((CALIBRATION_DIMENSIONS[0] * CALIBRATION_DIMENSIONS[1], 3),
np.float32)
self.image_points = []
self.cursor_pool_counter = 0
self.boundary = {
'x': (0, 0),
'y': (0, 0)
}
self.camera_calibration_counter = 0
self.homography_mtx = None
self.prev_pen_pos = (-1, -1)
self.curr_frame = None
self.curr_hsv_list = []
self.status = PluginStatus.TRY_LOAD_CALIBRATE_CAMERA
self.camera_values = {}
self.pen_hsv = {'high': np.array([0, 0, 0]), 'low': np.array([180, 255, 255])}
self.shadow_hsv = {'high': np.array([0, 0, 0]), 'low': np.array([180, 255, 255])}
print(self.status)
self.estimate_real_world_coordinate()
cv.namedWindow(MAIN_WINDOW_NAME)
cv.namedWindow(MASK_WINDOW_NAME)
cv.moveWindow(MAIN_WINDOW_NAME, 0, 0)
cv.moveWindow(MASK_WINDOW_NAME, 1000, 0)
cv.setMouseCallback(MAIN_WINDOW_NAME, self.calibrate_hsv_callback)
self.sld = []
self.click = False
def estimate_real_world_coordinate(self):
try:
with np.load(DATA_DIRECTORY + 'coordinates.npz') as coordinates:
self.real_world_point_coordinate = coordinates['arr_0']
except FileNotFoundError:
for i in range(CALIBRATION_DIMENSIONS[1]):
for j in range(0, CALIBRATION_DIMENSIONS[0]):
x = i * BLOB_DIST
y = (2 * j + i % 2) * BLOB_DIST
z = 0
self.real_world_point_coordinate[i * CALIBRATION_DIMENSIONS[0] + j] = np.array([x, y, z])
np.savez(DATA_DIRECTORY + 'coordinates', self.real_world_point_coordinate)
def try_load_calibrate_camera(self):
try:
with np.load(DATA_DIRECTORY + 'camera.npz') as camera_value:
mtx, dist, new_camera_mtx, roi, image_points = [camera_value[i] for i in
('mtx', 'dist', 'new_camera_mtx', 'roi', 'img_pts')]
self.image_points = np.array(image_points).reshape(-1, 2)
boundary_min, boundary_max = np.amin(self.image_points, axis=0), np.amax(self.image_points, axis=0)
self.boundary = {
'x': (int(boundary_min[0]), int(boundary_max[0])),
'y': (int(boundary_min[1]), int(boundary_max[1]))
}
self.camera_values = {
'mtx': mtx,
'dist': dist,
'new_camera_mtx': new_camera_mtx,
'roi': roi
}
self.status = PluginStatus(self.status.value + 2)
print(self.status)
except FileNotFoundError:
self.status = PluginStatus(self.status.value + 1)
print(self.status)
def calibrate_camera(self, frame):
if self.camera_calibration_counter >= CAMERA_CALIBRATION_REQUIRED:
self.camera_calibration_counter = 0
object_points = np.array([self.real_world_point_coordinate] * CAMERA_CALIBRATION_REQUIRED)
ret, mtx, dist, r_vec, t_vec = cv.calibrateCamera(object_points, self.image_points,
self.curr_frame.shape[1::-1], None, None)
h, w = frame.shape[:2]
print(h, w)
new_camera_mtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
np.savez(DATA_DIRECTORY + 'camera',
mtx=mtx,
new_camera_mtx=new_camera_mtx,
roi=roi,
dist=dist,
img_pts=self.image_points)
self.camera_values = {
'mtx': mtx,
'dist': dist,
'new_camera_mtx': new_camera_mtx,
'roi': roi
}
self.image_points = np.array(self.image_points).reshape(-1, 2)
boundary_min, boundary_max = np.amin(self.image_points, axis=0), np.amax(self.image_points, axis=0)
self.boundary = {
'x': (int(boundary_min[0]), int(boundary_max[0])),
'y': (int(boundary_min[1]), int(boundary_max[1]))
}
print(self.boundary)
self.status = PluginStatus(self.status.value + 1)
print(self.status)
return
self.curr_frame = frame.copy()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
ret, corners = cv.findCirclesGrid(gray, CALIBRATION_DIMENSIONS,
flags=cv.CALIB_CB_ASYMMETRIC_GRID + cv.CALIB_CB_CLUSTERING)
frame = cv.drawChessboardCorners(frame, CALIBRATION_DIMENSIONS, corners, ret)
cv.imshow(MAIN_WINDOW_NAME, frame)
key = cv.waitKey(KEY_WAIT_DURATION)
if key == ord('s') and ret:
self.camera_calibration_counter += 1
self.image_points.append(corners)
print(
f'current camera calibration counter: {self.camera_calibration_counter}/{CAMERA_CALIBRATION_REQUIRED}')
def try_load_homography_mtx(self):
try:
with np.load(DATA_DIRECTORY + 'homography.npz') as homography_mtx:
self.homography_mtx = homography_mtx['arr_0']
self.status = PluginStatus(self.status.value + 2)
print(self.status)
except FileNotFoundError:
self.status = PluginStatus(self.status.value + 1)
print(self.status)
def calculate_homography_mtx(self):
image_points = np.array(self.image_points).reshape((-1, 2))
object_points = np.array([self.real_world_point_coordinate] * CAMERA_CALIBRATION_REQUIRED)
object_points = object_points.reshape((-1, 3))
self.homography_mtx, status = cv.findHomography(image_points, object_points)
np.savez(DATA_DIRECTORY + 'homography', self.homography_mtx)
self.status = PluginStatus(self.status.value + 1)
print(self.status)
def calibrate_hsv_callback(self, event, x, y, flags, user_data):
if event != cv.EVENT_LBUTTONDOWN:
return
hsv_image = cv.cvtColor(self.curr_frame, cv.COLOR_BGR2HSV)
hsv_point = hsv_image[y, x]
self.curr_hsv_list += [hsv_point * 0.8, hsv_point * 1.2]
def try_load_calibrate_hsv(self):
try:
if self.status == PluginStatus.TRY_LOAD_CALIBRATE_PEN:
with np.load(DATA_DIRECTORY + 'pen_hsv.npz') as temp_hsv:
self.pen_hsv['high'] = temp_hsv['high']
self.pen_hsv['low'] = temp_hsv['low']
else:
with np.load(DATA_DIRECTORY + 'shadow_hsv.npz') as temp_hsv:
self.shadow_hsv['high'] = temp_hsv['high']
self.shadow_hsv['low'] = temp_hsv['low']
self.status = PluginStatus(self.status.value + 2)
print(self.status)
except FileNotFoundError:
self.status = PluginStatus(self.status.value + 1)
print(self.status)
def calibrate_hsv(self, frame):
self.curr_frame = frame
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
if self.curr_hsv_list:
mask = cv.inRange(hsv, np.amin(self.curr_hsv_list, axis=0), np.amax(self.curr_hsv_list, axis=0))
cv.imshow(MASK_WINDOW_NAME, mask)
cv.imshow(MAIN_WINDOW_NAME, frame)
key = cv.waitKey(KEY_WAIT_DURATION)
if key == ord('s'):
# save hsv
low_hsv = np.amin(self.curr_hsv_list, axis=0)
high_hsv = np.amax(self.curr_hsv_list, axis=0)
name = 'pen_hsv' if self.status == PluginStatus.CALIBRATE_PEN else 'shadow_hsv'
np.savez(DATA_DIRECTORY + name, low=low_hsv, high=high_hsv)
if self.status == PluginStatus.CALIBRATE_PEN:
self.pen_hsv['low'] = low_hsv
self.pen_hsv['high'] = high_hsv
else:
self.shadow_hsv['low'] = low_hsv
self.shadow_hsv['high'] = high_hsv
self.curr_hsv_list = []
self.status = PluginStatus(self.status.value + 1)
print(self.status)
if key == ord('r'):
# rest hsv
self.curr_hsv_list = []
def track(self, frame):
kernel = np.ones((5, 5), np.uint8)
# # undistorted
# frame = cv.undistort(frame, self.camera_values['mtx'], self.camera_values['dist'], None,
# self.camera_values['new_camera_mtx'])
# # crop the image
# x, y, w, h = self.camera_values['roi']
# frame = frame[y:y + h, x:x + w]
# frame = cv.flip(frame, 1)
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
pen_mask = cv.inRange(hsv, self.pen_hsv['low'], self.pen_hsv['high'])
pen_mask = cv.erode(pen_mask, kernel, iterations=1)
pen_mask = cv.dilate(pen_mask, kernel, iterations=2)
pen_contours, _ = cv.findContours(pen_mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
if not pen_contours:
self.prev_pen_pos = (-1, -1)
cv.imshow(MAIN_WINDOW_NAME, frame)
cv.waitKey(KEY_WAIT_DURATION)
return
pen_maks_contour = max(pen_contours, key=cv.contourArea)
# pen_contour_area = cv.contourArea(pen_maks_contour)
# find biggest y point (lowest)
# hull = cv.convexHull(pen_maks_contour)
epsilon = 0.1 * cv.arcLength(pen_maks_contour, True)
approx = cv.approxPolyDP(pen_maks_contour, epsilon, True)
[[_, idx]] = np.argmax(approx, axis=0)
pen_x, pen_y = approx[idx][0]
# if self.prev_pen_pos == (-1, -1):
# self.prev_pen_pos = (pen_x, pen_y)
# if self.prev_pen_pos == (-1, -1):
# self.prev_pen_pos = (screen_x, screen_y)
# return
window_w = 120
window_h = 60
window_top_right_x = pen_x + window_w // 3
window_top_right_y = pen_y - window_h // 3
# window_top_right_x = self.prev_pen_pos[0] + window_w // 3
# window_top_right_y = self.prev_pen_pos[1] - window_h // 3
window = hsv[window_top_right_y:window_top_right_y + window_h, window_top_right_x - window_w:window_top_right_x]
cv.line(frame, (window_top_right_x, window_top_right_y), (window_top_right_x, window_top_right_y + window_h),
[255, 255, 255])
cv.line(frame, (window_top_right_x, window_top_right_y), (window_top_right_x - window_w, window_top_right_y),
[255, 255, 255])
cv.line(frame, (window_top_right_x - window_w, window_top_right_y + window_h),
(window_top_right_x - window_w, window_top_right_y), [255, 255, 255])
cv.line(frame, (window_top_right_x - window_w, window_top_right_y + window_h),
(window_top_right_x, window_top_right_y + window_h), [255, 255, 255])
shadow_threshold = window_w * window_h * 255 // 50
if window.size:
# only find shadow around pen
shadow_mask = cv.inRange(window, self.shadow_hsv['low'], self.shadow_hsv['high'])
cv.imshow(MASK_WINDOW_NAME, shadow_mask)
# shadow_frame = cv.bitwise_and(frame, frame, mask=shadow_mask)
shadow_contours, _ = cv.findContours(shadow_mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# cv.putText(frame, f'{shadow_mask.sum() / (window_h * window_w * 255):.2f}', (pen_x + 10, pen_y + 30),
# cv.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 0])
if shadow_contours and shadow_mask.sum() >= shadow_threshold:
shadow_maks_contour = max(shadow_contours, key=cv.contourArea)
# shadow_contour_area = cv.contourArea(shadow_maks_contour)
# shadow_y, shadow_x = frame.shape[:2]
[[curr_idx, _]] = np.argmax(shadow_maks_contour, axis=0)
shadow_x, shadow_y = shadow_maks_contour[curr_idx][0]
# shadow_x, shadow_y = 0, 0
#
# for contour in shadow_contours:
# [[curr_idx, _]] = np.argmax(contour, axis=0)
# curr_x, curr_y = contour[curr_idx][0]
# if curr_x > shadow_x:
# shadow_x = curr_x
# shadow_y = curr_y
estimated_contact_pos_x, estimated_contact_pos_y = pen_x, shadow_y
shadow_x += window_top_right_x - window_w
shadow_y += window_top_right_y
# shadow_dist = dist((pen_x, pen_y), (estimated_contact_pos_x, estimated_contact_pos_y))
shadow_dist = shadow_y - pen_y
# shadow_dist = np.std(shadow_mask) / np.mean(shadow_mask) > 2 and dist((pen_x, pen_y),
# (shadow_x, shadow_y)) < 10
# if self.sld:
# cv.putText(frame, f'{np.mean(self.sld):.2f}, {np.argmax(self.sld):.2f}', (shadow_x, shadow_y + 30),
# cv.FONT_HERSHEY_SIMPLEX, 1,
# [0, 255, 0])
cv.circle(frame, (shadow_x, shadow_y), 4, [0, 0, 255], 4)
cv.circle(frame, (pen_x, shadow_y), 4, [255, 255, 255], 4)
self.move_mouse(pen_x, pen_y, shadow_x, shadow_y)
cv.putText(frame, f'{pen_x}, {pen_y}', (pen_x, pen_y - 30), cv.FONT_HERSHEY_SIMPLEX, 1,
[0, 255, 0])
else:
self.prev_pen_pos = (-1, -1)
else:
self.prev_pen_pos = (-1, -1)
# [[x1,y1,w]] = (homo @ np.array([[x,y,1]]).T).T
cv.circle(frame, (pen_x, pen_y), 4, [0, 0, 255], 4)
# cv.circle(frame, (shadow_x, shadow_y), int(shadow_dist), [0, 0, 255], 4)
cv.drawContours(frame, approx, -1, [0, 255, 0], 4)
# cv.putText(frame, f'{screen_x}, {screen_y}', (pen_x, pen_y - 30), cv.FONT_HERSHEY_SIMPLEX, 1,
# [0, 255, 0])
# scale_percent = 300 # percent of original size
# width = int(crop.shape[1] * scale_percent / 100)
# height = int(crop.shape[0] * scale_percent / 100)
# dim = (width, height)
# crop_resized = cv.resize(crop, dim)
# if crop.size:
# gray = cv.cvtColor(crop, cv.COLOR_BGR2GRAY)
# m = np.mean(gray)
# s = np.std(gray)
# # print(s/m)
# if s/m > 1.5:
# if not click:
# click = True
# counter += 1
# print('click', counter)
# else:
# click = False
#
# cv.imshow('frame', crop)
cv.imshow(MAIN_WINDOW_NAME, frame)
cv.waitKey(KEY_WAIT_DURATION)
def move_mouse(self, pen_x, pen_y, shadow_x, shadow_y):
self.sld.append([pen_x, pen_y, shadow_x, shadow_y])
if len(self.sld) > CURSOR_POOL:
self.sld.pop(0)
self.cursor_pool_counter += 1
self.cursor_pool_counter %= CURSOR_POOL
pen_x, pen_y, shadow_x, shadow_y = np.mean(self.sld, axis=0)
# shadow_dist = (shadow_y if shadow_x > pen_x else pen_y) - pen_y
shadow_dist = shadow_y - pen_y
if shadow_dist <= 5:
# world_x, world_y, | |
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.core.cache import CacheableObject
from pymor.operators.constructions import induced_norm
from pymor.parameters.base import ParametricObject, Mu
from pymor.tools.frozendict import FrozenDict
class Model(CacheableObject, ParametricObject):
"""Interface for model objects.
A model object defines a discrete problem
via its `class` and the |Operators| it contains.
Furthermore, models can be
:meth:`solved <Model.solve>` for given
|parameter values| resulting in a solution |VectorArray|.
Attributes
----------
solution_space
|VectorSpace| of the solution |VectorArrays| returned by :meth:`solve`.
dim_output
Dimension of the model output returned by :meth:`output`. 0 if the
model has no output.
linear
`True` if the model describes a linear problem.
products
Dict of inner product operators associated with the model.
"""
solution_space = None
dim_output = 0
linear = False
products = FrozenDict()
def __init__(self, products=None, error_estimator=None, visualizer=None,
name=None):
products = FrozenDict(products or {})
if products:
for k, v in products.items():
setattr(self, f'{k}_product', v)
setattr(self, f'{k}_norm', induced_norm(v))
self.__auto_init(locals())
def _compute(self, solution=False, output=False, solution_d_mu=False, output_d_mu=False,
solution_error_estimate=False, output_error_estimate=False,
output_d_mu_return_array=False, mu=None, **kwargs):
return {}
def _compute_solution(self, mu=None, **kwargs):
"""Compute the model's solution for |parameter values| `mu`.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`.
Parameters
----------
mu
|Parameter values| for which to compute the solution.
kwargs
Additional keyword arguments to customize how the solution is
computed or to select additional data to be returned.
Returns
-------
|VectorArray| with the computed solution or a dict which at least
must contain the key `'solution'`.
"""
raise NotImplementedError
def _compute_output(self, solution, mu=None, **kwargs):
"""Compute the model's output for |parameter values| `mu`.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`. The assumption is made
that the output is a derived quantity from the model's internal state
as returned my :meth:`_compute_solution`. When this is not the case,
the computation of the output should be implemented in :meth:`_compute`.
.. note::
The default implementation applies the |Operator| given by the
:attr:`output_functional` attribute to the given `solution`
|VectorArray|.
Parameters
----------
solution
Internal model state for the given |parameter values|.
mu
|Parameter values| for which to compute the output.
kwargs
Additional keyword arguments to customize how the output is
computed or to select additional data to be returned.
Returns
-------
|NumPy array| with the computed output or a dict which at least
must contain the key `'output'`.
"""
if not getattr(self, 'output_functional', None):
return np.zeros((len(solution), 0))
else:
return self.output_functional.apply(solution, mu=mu).to_numpy()
def _compute_solution_d_mu_single_direction(self, parameter, index, solution, mu=None, **kwargs):
"""Compute the partial derivative of the solution w.r.t. a parameter index
Parameters
----------
parameter
parameter for which to compute the sensitivity
index
parameter index for which to compute the sensitivity
solution
Internal model state for the given |Parameter value|.
mu
|Parameter value| for which to solve
Returns
-------
The sensitivity of the solution as a |VectorArray|.
"""
raise NotImplementedError
def _compute_solution_d_mu(self, solution, mu=None, **kwargs):
"""Compute all partial derivative of the solution w.r.t. a parameter index
Parameters
----------
solution
Internal model state for the given |Parameter value|.
mu
|Parameter value| for which to solve
Returns
-------
A dict of all partial sensitivities of the solution.
"""
sensitivities = {}
for (parameter, size) in self.parameters.items():
sens_for_param = self.solution_space.empty()
for l in range(size):
sens_for_param.append(self._compute_solution_d_mu_single_direction(
parameter, l, solution, mu))
sensitivities[parameter] = sens_for_param
return sensitivities
def _compute_output_d_mu(self, solution, mu=None, return_array=False, **kwargs):
"""Compute the gradient w.r.t. the parameter of the output functional
Parameters
----------
solution
Internal model state for the given |Parameter value|.
mu
|Parameter value| for which to compute the gradient
return_array
if `True`, return the output gradient as a |NumPy array|.
Otherwise, return a dict of gradients for each |Parameter|.
Returns
-------
The gradient as a |NumPy array| or a dict of |NumPy arrays|.
"""
assert self.output_functional is not None
U_d_mus = self._compute_solution_d_mu(solution, mu)
gradients = [] if return_array else {}
for (parameter, size) in self.parameters.items():
array = np.empty(shape=(size, self.output_functional.range.dim))
for index in range(size):
output_partial_dmu = self.output_functional.d_mu(parameter, index).apply(
solution, mu=mu).to_numpy()[0]
U_d_mu = U_d_mus[parameter][index]
array[index] = output_partial_dmu + self.output_functional.jacobian(
solution, mu).apply(U_d_mu, mu).to_numpy()[0]
if return_array:
gradients.extend(array)
else:
gradients[parameter] = array
if return_array:
return np.array(gradients)
else:
return gradients
def _compute_solution_error_estimate(self, solution, mu=None, **kwargs):
"""Compute an error estimate for the computed internal state.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`. The assumption is made
that the error estimate is a derived quantity from the model's internal state
as returned my :meth:`_compute_solution`. When this is not the case,
the computation of the error estimate should be implemented in :meth:`_compute`.
.. note::
The default implementation calls the `estimate_error` method of the object
given by the :attr:`error_estimator` attribute, passing `solution`,
`mu`, `self` and `**kwargs`.
Parameters
----------
solution
Internal model state for the given |parameter values|.
mu
|Parameter values| for which to compute the error estimate.
kwargs
Additional keyword arguments to customize how the error estimate is
computed or to select additional data to be returned.
Returns
-------
The computed error estimate or a dict which at least must contain the key
`'solution_error_estimate'`.
"""
if self.error_estimator is None:
raise ValueError('Model has no error estimator')
return self.error_estimator.estimate_error(solution, mu, self, **kwargs)
def _compute_output_error_estimate(self, solution, mu=None, **kwargs):
"""Compute an error estimate for the computed model output.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`. The assumption is made
that the error estimate is a derived quantity from the model's internal state
as returned my :meth:`_compute_solution`. When this is not the case,
the computation of the error estimate should be implemented in :meth:`_compute`.
.. note::
The default implementation calls the `estimate_output_error` method of the object
given by the :attr:`error_estimator` attribute, passing `solution`,
`mu`, `self` and `**kwargs`.
Parameters
----------
solution
Internal model state for the given |parameter values|.
mu
|Parameter values| for which to compute the error estimate.
kwargs
Additional keyword arguments to customize how the error estimate is
computed or to select additional data to be returned.
Returns
-------
The computed error estimate or a dict which at least must contain the key
`'solution_error_estimate'`.
"""
if self.error_estimator is None:
raise ValueError('Model has no error estimator')
return self.error_estimator.estimate_output_error(solution, mu, self, **kwargs)
_compute_allowed_kwargs = frozenset()
def compute(self, solution=False, output=False, solution_d_mu=False, output_d_mu=False,
solution_error_estimate=False, output_error_estimate=False,
output_d_mu_return_array=False, *, mu=None, **kwargs):
"""Compute the solution of the model and associated quantities.
This methods computes the output of the model it's internal state
and various associated quantities for given |parameter values|
`mu`.
.. note::
The default implementation defers the actual computations to
the methods :meth:`_compute_solution`, :meth:`_compute_output`,
:meth:`_compute_solution_error_estimate` and :meth:`_compute_output_error_estimate`.
The call to :meth:`_compute_solution` is :mod:`cached <pymor.core.cache>`.
In addition, |Model| implementors may implement :meth:`_compute` to
simultaneously compute multiple values in an optimized way. The corresponding
`_compute_XXX` methods will not be called for values already returned by
:meth:`_compute`.
Parameters
----------
solution
If `True`, return the model's internal state.
output
If `True`, return the model output.
solution_d_mu
If not `False`, either `True` to return the derivative of the model's
internal state w.r.t. all parameter components or a tuple `(parameter, index)`
to return the derivative of a single parameter component.
output_d_mu
If `True`, return the gradient of the model output w.r.t. the |Parameter|.
solution_error_estimate
If `True`, return an error estimate for the computed internal state.
output_error_estimate
If `True`, return an error estimate for the computed output.
output_d_mu_return_array
if `True`, return the output gradient as a |NumPy array|.
Otherwise, return a dict of gradients for each |Parameter|.
mu
|Parameter values| for which to compute the values.
kwargs
Further keyword arguments to select further quantities that sould
be returned or to customize how the values are computed.
Returns
-------
A dict with the computed values.
"""
# make sure no unknown kwargs are passed
assert kwargs.keys() <= self._compute_allowed_kwargs
# parse parameter values
if not isinstance(mu, Mu):
mu = self.parameters.parse(mu)
assert self.parameters.assert_compatible(mu)
# log output
# explicitly checking if logging is disabled saves some cpu cycles
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
# first call | |
##############################################################################
##
# This file is part of Sardana
##
# http://www.tango-controls.org/static/sardana/latest/doc/html/axisex.html
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
import re
import sys
import time
import copy
import numpy
try:
import h5py
except ImportError:
pass
from sardana import State
from sardana.pool import AcqSynch
from sardana.pool.controller import TwoDController, Referable, \
Type, Description, MaxDimSize, FGet, FSet, DefaultValue
def gauss(x, mean, ymax, fwhm, yoffset=0):
return yoffset + ymax * numpy.power(2, -4 * ((x - mean) / fwhm)**2)
def generate_img(x_size, y_size, amplitude):
x = numpy.linspace(-10, 10, x_size)
y = numpy.linspace(-10, 10, y_size)
x, y = numpy.meshgrid(x, y)
img = (gauss(x, 0, amplitude, 4) * gauss(y, 0, amplitude, 4))
return img
def generate_ref(pattern, idx):
if pattern is None or pattern == "":
pattern = "h5file:///tmp/dummy2d_default_{index}.h5"
msg = None
try:
uri = pattern.format(index=idx)
except Exception:
uri = pattern
msg = ("Not able to format value reference template "
"with index. Trying to use directly the template...")
match_res = re.match(
r"(?P<scheme>h5file)://(?P<path>\S+)::(?P<dataset>\S+)",
uri)
if match_res is None:
match_res = re.match(
r"(?P<scheme>(h5file|file))://(?P<path>\S+)",
uri)
if match_res is None:
raise Exception("invalid value reference template")
scheme = match_res.group("scheme")
path = match_res.group("path")
try:
dataset_name = match_res.group("dataset")
except IndexError:
if scheme == "h5file":
dataset_name = "dataset"
else:
dataset_name = None
return scheme, path, dataset_name, msg
def save_img(img, path, dataset_name):
msg = None
if "h5py" not in sys.modules:
msg = "Not able to store h5 file (h5py is not available)"
try:
h5f = h5py.File(path, "w")
h5f.create_dataset(dataset_name, data=img)
except Exception:
msg = "Not able to store h5 file."
return msg
class Channel:
def __init__(self, idx):
self.idx = idx # 1 based index
self.value = []
self.value_ref = None
self.is_counting = False
self.acq_idx = 0
self.buffer_values = []
self.buffer_value_refs = []
self.amplitude = BaseValue('1.0')
self.saving_enabled = False
self.value_ref_pattern = "h5file:///tmp/dummy2d_default_{index}.h5"
self.value_ref_enabled = False
self.roi = [0, 0, 0, 0]
class BaseValue(object):
def __init__(self, value):
self.raw_value = value
self.init()
def init(self):
self.value = float(self.raw_value)
def get(self):
return self.value
def get_value_name(self):
return self.raw_value
class TangoValue(BaseValue):
def init(self):
import PyTango
self.attr_proxy = PyTango.AttributeProxy(self.raw_value)
def get(self):
return self.attr_proxy.read().value
class BasicDummyTwoDController(TwoDController):
"""This class represents a basic, dummy Sardana TwoD controller."""
gender = "Simulation"
model = "Basic"
organization = "Sardana team"
MaxDevice = 1024
BufferSize = 1024, 1024
default_latency_time = 0.0
ctrl_attributes = {
"Synchronizer": {
Type: str,
Description: ("Hardware (external) emulated synchronizer. "
"Can be any of dummy trigger/gate elements "
"from the same pool.")
},
}
axis_attributes = {
'Amplitude': {
Type: str,
FGet: 'getAmplitude',
FSet: 'setAmplitude',
Description: ("Amplitude. Maybe a number or a tango attribute "
"(must start with tango://)"),
DefaultValue: '1.0'
},
'RoI': {
Type: (int,),
FGet: 'getRoI',
FSet: 'setRoI',
Description: ("Region of Interest of image "
"(begin_x, end_x, begin_y, end_y)"),
DefaultValue: [0, 0, 0, 0]
}
}
def __init__(self, inst, props, *args, **kwargs):
TwoDController.__init__(self, inst, props, *args, **kwargs)
self.channels = self.MaxDevice * [None, ]
self.start_time = None
self.integ_time = None
self.repetitions = None
self.latency_time = None
self.acq_cycle_time = None # integ_time + latency_time
self.estimated_duration = None
self.start_idx = None
self._synchronization = AcqSynch.SoftwareTrigger
self.read_channels = {}
self.counting_channels = {}
# name of synchronizer element
self._synchronizer = None
# synchronizer element (core)
self.__synchronizer_obj = None
# flag whether the controller was armed for hardware synchronization
self._armed = False
def GetAxisAttributes(self, axis):
# the default max shape for 'value' is (16*1024,).
# We don't need so much so we set it to BufferSize
attrs = super(BasicDummyTwoDController, self).GetAxisAttributes(axis)
attrs['Value'][MaxDimSize] = self.BufferSize
return attrs
def AddDevice(self, axis):
idx = axis - 1
self.channels[idx] = channel = Channel(axis)
channel.value = numpy.zeros(self.BufferSize, dtype=numpy.float64)
def DeleteDevice(self, axis):
idx = axis - 1
self.channels[idx] = None
def PrepareOne(self, axis, value, repetitions, latency, nb_starts):
self.start_idx = -1
def LoadOne(self, axis, integ_time, repetitions, latency_time):
self.integ_time = integ_time
self.repetitions = repetitions
self.latency_time = latency_time
self.acq_cycle_time = acq_cycle_time = integ_time + latency_time
self.estimated_duration = acq_cycle_time * repetitions - latency_time
def PreStartAll(self):
self.counting_channels = {}
self.read_channels = {}
self.start_idx += 1
def PreStartOne(self, axis, value):
idx = axis - 1
channel = self.channels[idx]
channel.value = None
channel.acq_idx = 0
channel.buffer_values = []
self.counting_channels[axis] = channel
self.read_channels[axis] = channel
return True
def StartOne(self, axis, value):
if self._synchronization in (AcqSynch.SoftwareStart,
AcqSynch.SoftwareTrigger):
self.counting_channels[axis].is_counting = True
def StartAll(self):
if self._synchronization in (AcqSynch.HardwareStart,
AcqSynch.HardwareTrigger,
AcqSynch.HardwareGate):
self._connect_hardware_synchronization()
self._armed = True
else:
self.start_time = time.time()
def _updateChannelState(self, axis, elapsed_time):
if self._synchronization == AcqSynch.SoftwareTrigger:
if self.integ_time is not None:
# counting in time
if elapsed_time >= self.integ_time:
self._finish(elapsed_time)
elif self._synchronization in (AcqSynch.HardwareTrigger,
AcqSynch.HardwareGate,
AcqSynch.HardwareStart,
AcqSynch.SoftwareStart):
if self.integ_time is not None:
# counting in time
if elapsed_time > self.estimated_duration:
self._finish(elapsed_time)
def StateOne(self, axis):
idx = axis - 1
sta = State.On
status = "Stopped"
if self._armed:
sta = State.Moving
status = "Armed"
elif axis in self.counting_channels and self.start_time is not None:
channel = self.channels[idx]
now = time.time()
elapsed_time = now - self.start_time
self._updateChannelState(axis, elapsed_time)
if channel.is_counting:
sta = State.Moving
status = "Acquiring"
# TODO: do it only once at the end
self._updateChannelValue(axis, elapsed_time)
return sta, status
def _updateChannelValue(self, axis, elapsed_time):
channel = self.channels[axis - 1]
if channel.acq_idx == self.repetitions:
return
x_size = self.BufferSize[0]
y_size = self.BufferSize[1]
amplitude = axis * self.integ_time * channel.amplitude.get()
img = generate_img(x_size, y_size, amplitude)
roi = channel.roi
if roi != [0, 0, 0, 0]:
img = img[roi[0]:roi[1], roi[2]:roi[3]]
if self._synchronization == AcqSynch.SoftwareTrigger:
channel.value = img
channel.acq_idx += 1
elif self._synchronization in (AcqSynch.HardwareTrigger,
AcqSynch.HardwareGate,
AcqSynch.HardwareStart,
AcqSynch.SoftwareStart):
acq_cycle_time = self.acq_cycle_time
nb_elapsed_acq, resting = divmod(elapsed_time, acq_cycle_time)
nb_elapsed_acq = int(nb_elapsed_acq)
# do not wait the last latency_time
if (nb_elapsed_acq == self.repetitions - 1
and resting > self.integ_time):
nb_elapsed_acq += 1
if nb_elapsed_acq > self.repetitions:
nb_elapsed_acq = self.repetitions
nb_new_acq = nb_elapsed_acq - channel.acq_idx
if nb_new_acq == 0:
return
channel.buffer_values.extend([img] * nb_new_acq)
channel.acq_idx += nb_new_acq
def ReadOne(self, axis):
self._log.debug('ReadOne(%d): entering...' % axis)
try:
channel = self.read_channels[axis]
except KeyError:
# TODO: After SEP17 this won't be necessary anymore.
msg = "no acquisition done on axis {0} so far".format(axis)
raise RuntimeError(msg)
ret = None
if self._synchronization in (AcqSynch.HardwareTrigger,
AcqSynch.HardwareGate,
AcqSynch.HardwareStart,
AcqSynch.SoftwareStart):
values = copy.deepcopy(channel.buffer_values)
channel.buffer_values.__init__()
ret = values
elif self._synchronization == AcqSynch.SoftwareTrigger:
ret = channel.value
self._log.debug('ReadOne(%d): returning %s' % (axis, repr(ret)))
return ret
def _finish(self, elapsed_time, axis=None):
if axis is None:
for axis, channel in list(self.counting_channels.items()):
channel.is_counting = False
self._updateChannelValue(axis, elapsed_time)
elif axis in self.counting_channels:
channel = self.counting_channels[axis]
channel.is_counting = False
self._updateChannelValue(axis, elapsed_time)
self.counting_channels.pop(axis)
if self._synchronization in (AcqSynch.HardwareStart,
AcqSynch.HardwareTrigger,
AcqSynch.HardwareGate):
self._disconnect_hardware_synchronization()
self._armed = False
self.start_time = None
def AbortOne(self, axis):
if axis not in self.counting_channels:
return
now = time.time()
if self.start_time is not None:
elapsed_time = now - self.start_time
else:
elapsed_time = 0
self._finish(elapsed_time, axis)
def getAmplitude(self, axis):
idx = axis - 1
channel = self.channels[idx]
return channel.amplitude.get_value_name()
def setAmplitude(self, axis, value):
idx = axis - 1
channel = self.channels[idx]
klass = BaseValue
if value.startswith("tango://"):
klass = TangoValue
channel.amplitude = klass(value)
def getRoI(self, axis):
idx = axis - 1
channel = self.channels[idx]
return channel.roi
def setRoI(self, axis, value):
idx = axis - 1
channel = self.channels[idx]
try:
value = value.tolist()
except AttributeError:
pass
if len(value) != 4:
raise ValueError("RoI is not a list of four elements")
if any(not isinstance(v, int) for v in value):
raise ValueError("RoI is not a list of integers")
if value != [0, 0, 0, 0]:
if value[1] <= value[0]:
raise ValueError("RoI[1] is lower or equal than RoI[0]")
if value[3] <= value[2]:
raise ValueError("RoI[3] is lower or equal than RoI[2]")
x_dim = self.BufferSize[0]
if value[0] > (x_dim - 1):
raise ValueError(
"RoI[0] exceeds detector X dimension - 1 ({})".format(
x_dim - 1))
if value[1] | |
<reponame>MorganShorter/drop
# Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getpass, json, logging, os, pwd, re, socket, subprocess, sys
import sqlparse
from sqlparse import sql, tokens
import tero, tero.dstamp
def _load_sqlschema(schema_text):
tables = {}
statements = sqlparse.parse(schema_text)
for statement in statements:
statement_tokens = statement.tokens # XXX list(statement.flatten())
# CREATE
token = statement_tokens.pop(0) if statement_tokens else None
while token and not token.match(tokens.Keyword.DDL, values=('CREATE',)):
token = statement_tokens.pop(0) if statement_tokens else None
if not token:
continue
# TABLE
token = statement_tokens.pop(0) if statement_tokens else None
while token and not token.match(tokens.Keyword, values=('TABLE',)):
token = statement_tokens.pop(0) if statement_tokens else None
if not token:
continue
# identifier
token = statement_tokens.pop(0) if statement_tokens else None
while token and not isinstance(token, sql.Identifier):
token = statement_tokens.pop(0) if statement_tokens else None
if not token:
continue
table_identifier = token.value
# fields
tables[table_identifier] = {}
logging.warning("CREATE TABLE %s", table_identifier)
token = statement_tokens.pop(0) if statement_tokens else None
while token and not isinstance(token, sql.Parenthesis):
token = statement_tokens.pop(0) if statement_tokens else None
if not token:
continue
field_tokens = list(token.flatten()) # XXX token.tokens
while field_tokens:
field_name = None
field_type = None
field_modifier = False
field_length = None
field_not_null = False
# field identifier
field_token = field_tokens.pop(0) if field_tokens else None
while field_token and not (
field_token.match(tokens.Name, values=None) or
field_token.match(tokens.Name.Builtin, values=None) or
field_token.match(tokens.Keyword, values=None)):
field_token = field_tokens.pop(0) if field_tokens else None
if field_token.match(tokens.Keyword, values=('CONSTRAINT',)):
while field_token and not (
field_token.match(tokens.Punctuation, values=(',',))):
field_token = field_tokens.pop(0) if field_tokens else None
if field_token:
continue
if not field_token:
continue
field_name = field_token.value
# field type
field_token = field_tokens.pop(0) if field_tokens else None
while field_token and not (
field_token.match(tokens.Name.Builtin, values=None) or
field_token.match(tokens.Keyword, values=None)):
field_token = field_tokens.pop(0) if field_tokens else None
if not field_token:
continue
field_type = field_token
# `character` is followed by `varying`
field_token = field_tokens.pop(0) if field_tokens else None
while field_token:
if field_token.match(tokens.Name, values=('varying',)):
while field_token and not (field_token.match(
tokens.Token.Literal.Number.Integer, values=None)):
field_token = (
field_tokens.pop(0) if field_tokens else None)
field_length = int(field_token.value)
field_token = field_tokens.pop(0) if field_tokens else None
elif field_token.match(tokens.Keyword.CTE, values=('WITH',)):
field_modifier = True
field_token = field_tokens.pop(0) if field_tokens else None
elif field_token.match(tokens.Keyword, values=('NOT NULL',)):
field_not_null = True
field_token = field_tokens.pop(0) if field_tokens else None
elif field_token.match(tokens.Punctuation, values=(',',)):
break
else:
field_token = field_tokens.pop(0) if field_tokens else None
tables[table_identifier][field_name] = {
'type': field_type.value,
'required': field_not_null
}
if field_modifier:
tables[table_identifier][field_name].update({
'timezone': True
})
if field_length:
tables[table_identifier][field_name].update({
'length': field_length
})
logging.warning('- "%s" %s%s%s%s',
field_name,
field_type,
" WITH XXX" if field_modifier else "",
" varying(%d)" % field_length if field_length else "",
" NOT NULL" if field_not_null else "")
return tables
def check_apps(reference_prerequisites=None, root_dir='/var/www',
write_to_file=False):
"""
Check versions of apps currently running on the machine.
"""
apps = find_apps(root_dir)
checked = True
for app_name, app_snap in apps.items():
app_prerequisites = app_snap['dependencies']
if write_to_file:
app_schema_path = '%s-prerequisites.json' % app_name
logging.warning("saving prerequisites for %s to %s ...",
app_name, app_schema_path)
with open(app_schema_path, 'w') as schema_file:
schema_file.write(json.dumps(app_prerequisites, indent=2))
print("App %s:" % str(app_name))
if not reference_prerequisites:
continue
added_prerequisites = (
set(app_prerequisites) - set(reference_prerequisites))
removed_prerequisites = (
set(reference_prerequisites) - set(app_prerequisites))
if added_prerequisites:
checked = False
print("The following prerequisites were added to the reference:")
for prerequisite in sorted(added_prerequisites):
print("- %s==%s" % (
prerequisite, app_prerequisites[prerequisite]))
if removed_prerequisites:
checked = False
print(
"The following prerequisites were removed from the reference:")
for prerequisite in sorted(removed_prerequisites):
print("- %s==%s" % (
prerequisite, reference_prerequisites[prerequisite]))
first_time = True
for prerequisite in sorted(
set(app_prerequisites) & set(reference_prerequisites)):
if (app_prerequisites[prerequisite] !=
reference_prerequisites[prerequisite]):
checked = False
if first_time:
print("The following prerequisites were changed:")
first_time = False
print("- %s version %s, expected version %s" % (
prerequisite, app_prerequisites[prerequisite],
reference_prerequisites[prerequisite]))
return checked
def check_permissions(paths, owner, group, mode):
for path in paths:
stat = os.stat(path)
if stat.st_uid != owner:
sys.stderr.write('onwer mismatch: ' + path + '\n')
if stat.st_gid != group:
sys.stderr.write('group mismatch: ' + path + '\n')
if stat.st_mode != mode:
sys.stderr.write('mode mismatch: ' + path + '\n')
def check_sqlschema(schema_text, reference_schema=None):
"""
Analyze a SQL schema that was dumped with `pg_dump --schema-only`
"""
schema = _load_sqlschema(schema_text)
if not reference_schema:
logging.warning("There are no reference schema to compare against.")
reference_schema = schema
added_tables = (set(schema) - set(reference_schema))
removed_tables = (set(reference_schema) - set(schema))
if added_tables:
print("The following tables were added to the reference schema:")
for table in sorted(added_tables):
print("- %s" % table)
print("")
if removed_tables:
print("The following tables were removed from the reference schema:")
for table in sorted(removed_tables):
print("- %s" % table)
print("")
for table in sorted(set(schema) & set(reference_schema)):
added_fields = (set(schema[table]) - set(reference_schema[table]))
removed_fields = (set(reference_schema[table]) - set(schema[table]))
altered = []
for field in sorted(set(schema[table]) & set(reference_schema[table])):
if (schema[table][field]['type'] !=
reference_schema[table][field]['type']):
altered += ['"%s" type was altered from %s to %s' % (
field,
reference_schema[table][field]['type'],
schema[table][field]['type'])]
elif (schema[table][field].get('length', 0) !=
reference_schema[table][field].get('length', 0)):
altered += ['"%s" length was altered from %d to %d' % (
field,
reference_schema[table][field].get('length', 0),
schema[table][field].get('length', 0))]
if (schema[table][field]['required'] !=
reference_schema[table][field]['required']):
altered += ['"%s" was altered from %s to %s' % (
field, ("NOT NULL"\
if reference_schema[table][field]['required'] else "NULL"),
"NOT NULL" if schema[table][field]['required'] else "NULL")]
if added_fields or removed_fields or altered:
print('Table "%s" was altered:' % table)
if added_fields:
print('\tThe following fields were added:')
for field in sorted(added_fields):
print("\t- %s%s" % (field, (" NOT NULL"\
if schema[table][field]['required'] else "")))
if removed_fields:
print('\tThe following fields were removed:')
for field in sorted(removed_fields):
print("\t- %s" % field)
if altered:
print('\tThe following fields were altered:')
for field in altered:
print("\t- %s" % field)
print('')
def create_archives(backup_dir, backup_tops):
'''Create an archive out of each backup_top.'''
os.chdir(backup_dir)
for backup_top in backup_tops:
basename = os.path.basename(backup_top)
archive = tero.stampfile(basename)
tero.shell_command(['tar', '--bzip2', '-cf', archive,
'-C', os.path.dirname(backup_top),
'--exclude', 'build/',
basename])
tero.dstamp.cleanup_aged_files(backup_dir)
def fingerprint_fs(context, log_path_prefix, exclude_tops=None):
'''Uses mtree to take a fingerprint of the filesystem and output
the specification file in "*log_path_prefix*.mtree".
If an *exclude_tops* file exists, it contains patterns used to skip
over parts of the filesystem to fingerprint.'''
if not exclude_tops and os.path.exists(exclude_tops):
exclude_tops_flags = " -X " + exclude_tops
else:
exclude_tops_flags = ""
tero.shell_command([os.path.join(context.value('binDir'), 'mtree'),
' -c -K sha1digest -p /',
exclude_tops_flags,
' > ' + os.path.abspath(log_path_prefix + '.mtree')])
def find_apps(root_dir):
"""
Find apps installed in *root_dir*
"""
apps = {}
for app_name in os.listdir(root_dir):
python_version = None
python = os.path.join(root_dir, app_name, 'bin', 'python')
# find python version
if os.path.exists(python):
cmdline = [python, '--version']
freeze_output = subprocess.check_output(cmdline)
look = re.match(r'Python ([0-9]+(\.[0-9]+)*)',
freeze_output.decode('utf-8'))
if look:
python_version = look.group(1)
apps.update({app_name: {
'owner': pwd.getpwuid(os.stat(
os.path.join(root_dir, app_name)).st_uid).pw_name,
'dependencies': {
'python': python_version,
}}})
# find python prerequisites
pip = os.path.join(root_dir, app_name, 'bin', 'pip')
if os.path.exists(pip):
cmdline = [pip, 'freeze']
output_lines = tero.shell_command(cmdline, pat=r'.*')
for line in output_lines:
look = re.match(r'(\S+)==(\S+)', line)
if look:
prerequisite = look.group(1)
version = look.group(2)
apps[app_name]['dependencies'].update({
prerequisite: version})
# find process PID
pid_path = os.path.join(
root_dir, app_name, 'var', 'run', '%s.pid' % app_name)
if os.path.exists(pid_path):
with open(pid_path) as pid_file:
pid = int(pid_file.read())
apps[app_name].update({'pid': pid})
return apps
def find_disk_usage(dist_host):
"""
List information about disk usage
"""
tero.shell_command(['/usr/bin/df', '-lh', '--total'])
def find_privileged_executables(log_path_prefix):
'''Look through the filesystem for executables that have the suid bit
turned on and executables | |
<gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, <NAME> <<EMAIL>>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_dvswitch_uplink_pg
short_description: Manage uplink portproup configuration of a Distributed Switch
description:
- This module can be used to configure the uplink portgroup of a Distributed Switch.
version_added: 2.8
author:
- <NAME> (@ckotte)
notes:
- Tested on vSphere 6.5 and 6.7
requirements:
- "python >= 2.6"
- PyVmomi
options:
switch:
description:
- The name of the Distributed Switch.
type: str
required: True
aliases: ['dvswitch']
name:
description:
- The name of the uplink portgroup.
- The current name will be used if not specified.
type: str
description:
description:
- The description of the uplink portgroup.
type: str
advanced:
description:
- Dictionary which configures the advanced policy settings for the uplink portgroup.
- 'Valid attributes are:'
- '- C(port_config_reset_at_disconnect) (bool): indicates if the configuration of a port is reset automatically after disconnect. (default: true)'
- '- C(block_override) (bool): indicates if the block policy can be changed per port. (default: true)'
- '- C(netflow_override) (bool): indicates if the NetFlow policy can be changed per port. (default: false)'
- '- C(traffic_filter_override) (bool): indicates if the traffic filter can be changed per port. (default: false)'
- '- C(vendor_config_override) (bool): indicates if the vendor config can be changed per port. (default: false)'
- '- C(vlan_override) (bool): indicates if the vlan can be changed per port. (default: false)'
required: False
default: {
port_config_reset_at_disconnect: True,
block_override: True,
vendor_config_override: False,
vlan_override: False,
netflow_override: False,
traffic_filter_override: False,
}
aliases: ['port_policy']
type: dict
vlan_trunk_range:
description:
- The VLAN trunk range that should be configured with the uplink portgroup.
- 'This can be a combination of multiple ranges and numbers, example: [ 2-3967, 4049-4092 ].'
type: list
default: [ '0-4094' ]
lacp:
description:
- Dictionary which configures the LACP settings for the uplink portgroup.
- The options are only used if the LACP support mode is set to 'basic'.
- 'The following parameters are required:'
- '- C(status) (str): Indicates if LACP is enabled. (default: disabled)'
- '- C(mode) (str): The negotiating state of the uplinks/ports. (default: passive)'
required: False
default: {
status: 'disabled',
mode: 'passive',
}
type: dict
netflow_enabled:
description:
- Indicates if NetFlow is enabled on the uplink portgroup.
type: bool
default: False
block_all_ports:
description:
- Indicates if all ports are blocked on the uplink portgroup.
type: bool
default: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Configure Uplink portgroup
vmware_dvswitch_uplink_pg:
hostname: '{{ inventory_hostname }}'
username: '{{ vcsa_username }}'
password: '{{ <PASSWORD> }}'
switch: dvSwitch
name: dvSwitch-DVUplinks
advanced:
port_config_reset_at_disconnect: True
block_override: True
vendor_config_override: False
vlan_override: False
netflow_override: False
traffic_filter_override: False
vlan_trunk_range:
- '0-4094'
netflow_enabled: False
block_all_ports: False
delegate_to: localhost
- name: Enabled LACP on Uplink portgroup
vmware_dvswitch_uplink_pg:
hostname: '{{ inventory_hostname }}'
username: '{{ vcsa_username }}'
password: '{{ <PASSWORD> }}'
switch: dvSwitch
lacp:
status: enabled
mode: active
delegate_to: localhost
'''
RETURN = """
result:
description: information about performed operation
returned: always
type: str
sample: {
"adv_block_ports": true,
"adv_netflow": false,
"adv_reset_at_disconnect": true,
"adv_traffic_filtering": false,
"adv_vendor_conf": false,
"adv_vlan": false,
"block_all_ports": false,
"changed": false,
"description": null,
"dvswitch": "dvSwitch",
"lacp_status": "disabled",
"lacp_status_previous": "enabled",
"name": "dvSwitch-DVUplinks",
"netflow_enabled": false,
"result": "Uplink portgroup already configured properly",
"vlan_trunk_range": [
"2-3967",
"4049-4092"
]
}
"""
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import (
PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
)
class VMwareDvSwitchUplinkPortgroup(PyVmomi):
"""Class to manage a uplink portgroup on a Distributed Virtual Switch"""
def __init__(self, module):
super(VMwareDvSwitchUplinkPortgroup, self).__init__(module)
self.switch_name = self.module.params['switch']
self.uplink_pg_name = self.params['name']
self.uplink_pg_description = self.params['description']
self.uplink_pg_reset = self.params['advanced'].get('port_config_reset_at_disconnect')
self.uplink_pg_block_ports = self.params['advanced'].get('block_override')
self.uplink_pg_vendor_conf = self.params['advanced'].get('vendor_config_override')
self.uplink_pg_vlan = self.params['advanced'].get('vlan_override')
self.uplink_pg_netflow = self.params['advanced'].get('netflow_override')
self.uplink_pg_tf = self.params['advanced'].get('traffic_filter_override')
self.uplink_pg_vlan_trunk_range = self.params['vlan_trunk_range']
self.uplink_pg_netflow_enabled = self.params['netflow_enabled']
self.uplink_pg_block_all_ports = self.params['block_all_ports']
self.lacp_status = self.params['lacp'].get('status')
self.lacp_mode = self.params['lacp'].get('mode')
self.dvs = find_dvs_by_name(self.content, self.switch_name)
if self.dvs is None:
self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name)
self.support_mode = self.dvs.config.lacpApiVersion
def ensure(self):
"""Manage uplink portgroup"""
changed = changed_uplink_pg_policy = changed_vlan_trunk_range = changed_lacp = False
results = dict(changed=changed)
results['dvswitch'] = self.switch_name
changed_list = []
uplink_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
# Use the same version in the new spec; The version will be increased by one by the API automatically
uplink_pg_spec.configVersion = self.dvs.config.uplinkPortgroup[0].config.configVersion
uplink_pg_config = self.dvs.config.uplinkPortgroup[0].config
# Check name
if self.uplink_pg_name:
results['name'] = self.uplink_pg_name
if uplink_pg_config.name != self.uplink_pg_name:
changed = True
changed_list.append("name")
results['name_previous'] = uplink_pg_config.name
uplink_pg_spec.name = self.uplink_pg_name
else:
results['name'] = uplink_pg_config.name
# Check description
results['description'] = self.uplink_pg_description
if uplink_pg_config.description != self.uplink_pg_description:
changed = True
changed_list.append("description")
results['description_previous'] = uplink_pg_config.description
uplink_pg_spec.description = self.uplink_pg_description
# Check port policies
results['adv_reset_at_disconnect'] = self.uplink_pg_reset
results['adv_block_ports'] = self.uplink_pg_block_ports
results['adv_vendor_conf'] = self.uplink_pg_vendor_conf
results['adv_vlan'] = self.uplink_pg_vlan
results['adv_netflow'] = self.uplink_pg_netflow
results['adv_traffic_filtering'] = self.uplink_pg_tf
uplink_pg_policy_spec = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
uplink_pg_policy_spec.portConfigResetAtDisconnect = self.uplink_pg_reset
uplink_pg_policy_spec.blockOverrideAllowed = self.uplink_pg_block_ports
uplink_pg_policy_spec.vendorConfigOverrideAllowed = self.uplink_pg_vendor_conf
uplink_pg_policy_spec.vlanOverrideAllowed = self.uplink_pg_vlan
uplink_pg_policy_spec.ipfixOverrideAllowed = self.uplink_pg_netflow
uplink_pg_policy_spec.trafficFilterOverrideAllowed = self.uplink_pg_tf
# There's no information available if the following option are deprecated, but
# they aren't visible in the vSphere Client
uplink_pg_policy_spec.shapingOverrideAllowed = False
uplink_pg_policy_spec.livePortMovingAllowed = False
uplink_pg_policy_spec.uplinkTeamingOverrideAllowed = False
uplink_pg_policy_spec.securityPolicyOverrideAllowed = False
uplink_pg_policy_spec.networkResourcePoolOverrideAllowed = False
# Check policies
if uplink_pg_config.policy.portConfigResetAtDisconnect != self.uplink_pg_reset:
changed_uplink_pg_policy = True
results['adv_reset_at_disconnect_previous'] = uplink_pg_config.policy.portConfigResetAtDisconnect
if uplink_pg_config.policy.blockOverrideAllowed != self.uplink_pg_block_ports:
changed_uplink_pg_policy = True
results['adv_block_ports_previous'] = uplink_pg_config.policy.blockOverrideAllowed
if uplink_pg_config.policy.vendorConfigOverrideAllowed != self.uplink_pg_vendor_conf:
changed_uplink_pg_policy = True
results['adv_vendor_conf_previous'] = uplink_pg_config.policy.vendorConfigOverrideAllowed
if uplink_pg_config.policy.vlanOverrideAllowed != self.uplink_pg_vlan:
changed_uplink_pg_policy = True
results['adv_vlan_previous'] = uplink_pg_config.policy.vlanOverrideAllowed
if uplink_pg_config.policy.ipfixOverrideAllowed != self.uplink_pg_netflow:
changed_uplink_pg_policy = True
results['adv_netflow_previous'] = uplink_pg_config.policy.ipfixOverrideAllowed
if uplink_pg_config.policy.trafficFilterOverrideAllowed != self.uplink_pg_tf:
changed_uplink_pg_policy = True
results['adv_traffic_filtering_previous'] = uplink_pg_config.policy.trafficFilterOverrideAllowed
if changed_uplink_pg_policy:
changed = True
changed_list.append("advanced")
uplink_pg_spec.policy = uplink_pg_policy_spec
uplink_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
# Check VLAN trunk
results['vlan_trunk_range'] = self.uplink_pg_vlan_trunk_range
vlan_id_ranges = self.uplink_pg_vlan_trunk_range
trunk_vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
vlan_id_list = []
for vlan_id_range in vlan_id_ranges:
vlan_id_range_found = False
vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
# Check if range is already configured
for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
if current_vlan_id_range.start == int(vlan_id_start) and current_vlan_id_range.end == int(vlan_id_end):
vlan_id_range_found = True
break
if vlan_id_range_found is False:
changed_vlan_trunk_range = True
vlan_id_list.append(
vim.NumericRange(start=int(vlan_id_start), end=int(vlan_id_end))
)
# Check if range needs to be removed
for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
vlan_id_range_found = False
for vlan_id_range in vlan_id_ranges:
vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
if (current_vlan_id_range.start == int(vlan_id_start)
and current_vlan_id_range.end == int(vlan_id_end)):
vlan_id_range_found = True
break
if vlan_id_range_found is False:
changed_vlan_trunk_range = True
trunk_vlan_spec.vlanId = vlan_id_list
if changed_vlan_trunk_range:
changed = True
changed_list.append("vlan trunk range")
current_vlan_id_list = []
for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
if current_vlan_id_range.start == current_vlan_id_range.end:
current_vlan_id_range_string = current_vlan_id_range.start
else:
current_vlan_id_range_string = '-'.join(
[str(current_vlan_id_range.start), str(current_vlan_id_range.end)]
)
current_vlan_id_list.append(current_vlan_id_range_string)
results['vlan_trunk_range_previous'] = current_vlan_id_list
uplink_pg_spec.defaultPortConfig.vlan = trunk_vlan_spec
# Check LACP
lacp_support_mode = self.get_lacp_support_mode(self.support_mode)
if lacp_support_mode == 'basic':
results['lacp_status'] = self.lacp_status
lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.UplinkLacpPolicy()
lacp_enabled = False
if self.lacp_status == 'enabled':
lacp_enabled = True
if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value != lacp_enabled:
changed_lacp = True
changed_list.append("lacp status")
if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value:
results['lacp_status_previous'] = 'enabled'
else:
results['lacp_status_previous'] = 'disabled'
lacp_spec.enable = vim.BoolPolicy()
lacp_spec.enable.inherited = False
lacp_spec.enable.value = lacp_enabled
if lacp_enabled and uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value != self.lacp_mode:
results['lacp_mode'] = self.lacp_mode
changed_lacp = True
changed_list.append("lacp mode")
results['lacp_mode_previous'] = uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value
lacp_spec.mode = vim.StringPolicy()
lacp_spec.mode.inherited = False
lacp_spec.mode.value = self.lacp_mode
if changed_lacp:
changed = True
uplink_pg_spec.defaultPortConfig.lacpPolicy = lacp_spec
# Check NetFlow
results['netflow_enabled'] = self.uplink_pg_netflow_enabled
netflow_enabled_spec = vim.BoolPolicy()
netflow_enabled_spec.inherited = False
netflow_enabled_spec.value = self.uplink_pg_netflow_enabled
if uplink_pg_config.defaultPortConfig.ipfixEnabled.value != self.uplink_pg_netflow_enabled:
changed = True
results['netflow_enabled_previous'] = uplink_pg_config.defaultPortConfig.ipfixEnabled.value
changed_list.append("netflow")
uplink_pg_spec.defaultPortConfig.ipfixEnabled = netflow_enabled_spec
# TODO: Check Traffic filtering and marking
# Check Block all ports
results['block_all_ports'] = self.uplink_pg_block_all_ports
block_all_ports_spec = vim.BoolPolicy()
block_all_ports_spec.inherited = False
block_all_ports_spec.value = self.uplink_pg_block_all_ports
if uplink_pg_config.defaultPortConfig.blocked.value != self.uplink_pg_block_all_ports:
changed = True
changed_list.append("block all ports")
results['block_all_ports_previous'] = uplink_pg_config.defaultPortConfig.blocked.value
uplink_pg_spec.defaultPortConfig.blocked = block_all_ports_spec
if changed:
if self.module.check_mode:
changed_suffix = ' would be changed'
else:
changed_suffix = ' changed'
if len(changed_list) > 2:
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
elif len(changed_list) == 2:
message = ' and '.join(changed_list)
elif len(changed_list) == 1:
message = changed_list[0]
message += changed_suffix
if not self.module.check_mode:
try:
task = self.dvs.config.uplinkPortgroup[0].ReconfigureDVPortgroup_Task(uplink_pg_spec)
wait_for_task(task)
except TaskError as invalid_argument:
self.module.fail_json(msg="Failed to update uplink portgroup : %s" % to_native(invalid_argument))
else:
message = "Uplink portgroup already configured properly"
results['changed'] = changed
results['result'] = message
self.module.exit_json(**results)
@staticmethod
def get_vlan_ids_from_range(vlan_id_range):
"""Get start and end VLAN ID from VLAN ID range"""
try:
vlan_id_start, vlan_id_end = vlan_id_range.split('-')
except (AttributeError, TypeError):
vlan_id_start = vlan_id_end = vlan_id_range
| |
_ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name,
"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", name,
tld.op_callbacks, "table_id", table_id, "table_name", table_name,
"num_shards", num_shards, "shard_id", shard_id, "config", config)
_result = _RetrieveTPUEmbeddingAdagradParametersGradAccumDebugOutput._make(_result)
return _result
except _core._FallbackException:
try:
return retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager_fallback(
table_id=table_id, table_name=table_name, num_shards=num_shards,
shard_id=shard_id, config=config, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is None:
table_name = ""
table_name = _execute.make_str(table_name, "table_name")
if config is None:
config = ""
config = _execute.make_str(config, "config")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", num_shards=num_shards,
shard_id=shard_id,
table_id=table_id,
table_name=table_name,
config=config,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("table_id", _op._get_attr_int("table_id"), "table_name",
_op.get_attr("table_name"), "num_shards",
_op._get_attr_int("num_shards"), "shard_id",
_op._get_attr_int("shard_id"), "config", _op.get_attr("config"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", _inputs_flat, _attrs, _result)
_result = _RetrieveTPUEmbeddingAdagradParametersGradAccumDebugOutput._make(_result)
return _result
RetrieveTPUEmbeddingAdagradParametersGradAccumDebug = tf_export("raw_ops.RetrieveTPUEmbeddingAdagradParametersGradAccumDebug")(_ops.to_raw_op(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug))
def retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx):
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is None:
table_name = ""
table_name = _execute.make_str(table_name, "table_name")
if config is None:
config = ""
config = _execute.make_str(config, "config")
_inputs_flat = []
_attrs = ("table_id", table_id, "table_name", table_name, "num_shards",
num_shards, "shard_id", shard_id, "config", config)
_result = _execute.execute(b"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug",
3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", _inputs_flat, _attrs, _result)
_result = _RetrieveTPUEmbeddingAdagradParametersGradAccumDebugOutput._make(_result)
return _result
_RetrieveTPUEmbeddingCenteredRMSPropParametersOutput = collections.namedtuple(
"RetrieveTPUEmbeddingCenteredRMSPropParameters",
["parameters", "ms", "mom", "mg"])
def retrieve_tpu_embedding_centered_rms_prop_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None):
r"""Retrieve centered RMSProp embedding parameters.
An op that retrieves optimization parameters from embedding to host
memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
the correct embedding table configuration. For example, this op is
used to retrieve updated parameters before saving a checkpoint.
Args:
num_shards: An `int`.
shard_id: An `int`.
table_id: An optional `int` that is `>= -1`. Defaults to `-1`.
table_name: An optional `string`. Defaults to `""`.
config: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (parameters, ms, mom, mg).
parameters: A `Tensor` of type `float32`.
ms: A `Tensor` of type `float32`.
mom: A `Tensor` of type `float32`.
mg: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name,
"RetrieveTPUEmbeddingCenteredRMSPropParameters", name,
tld.op_callbacks, "table_id", table_id, "table_name", table_name,
"num_shards", num_shards, "shard_id", shard_id, "config", config)
_result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result)
return _result
except _core._FallbackException:
try:
return retrieve_tpu_embedding_centered_rms_prop_parameters_eager_fallback(
table_id=table_id, table_name=table_name, num_shards=num_shards,
shard_id=shard_id, config=config, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is None:
table_name = ""
table_name = _execute.make_str(table_name, "table_name")
if config is None:
config = ""
config = _execute.make_str(config, "config")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"RetrieveTPUEmbeddingCenteredRMSPropParameters", num_shards=num_shards,
shard_id=shard_id,
table_id=table_id,
table_name=table_name,
config=config,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("table_id", _op._get_attr_int("table_id"), "table_name",
_op.get_attr("table_name"), "num_shards",
_op._get_attr_int("num_shards"), "shard_id",
_op._get_attr_int("shard_id"), "config", _op.get_attr("config"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"RetrieveTPUEmbeddingCenteredRMSPropParameters", _inputs_flat, _attrs, _result)
_result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result)
return _result
RetrieveTPUEmbeddingCenteredRMSPropParameters = tf_export("raw_ops.RetrieveTPUEmbeddingCenteredRMSPropParameters")(_ops.to_raw_op(retrieve_tpu_embedding_centered_rms_prop_parameters))
def retrieve_tpu_embedding_centered_rms_prop_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx):
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is None:
table_name = ""
table_name = _execute.make_str(table_name, "table_name")
if config is None:
config = ""
config = _execute.make_str(config, "config")
_inputs_flat = []
_attrs = ("table_id", table_id, "table_name", table_name, "num_shards",
num_shards, "shard_id", shard_id, "config", config)
_result = _execute.execute(b"RetrieveTPUEmbeddingCenteredRMSPropParameters",
4, inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"RetrieveTPUEmbeddingCenteredRMSPropParameters", _inputs_flat, _attrs, _result)
_result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result)
return _result
_RetrieveTPUEmbeddingFTRLParametersOutput = collections.namedtuple(
"RetrieveTPUEmbeddingFTRLParameters",
["parameters", "accumulators", "linears"])
def retrieve_tpu_embedding_ftrl_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None):
r"""Retrieve FTRL embedding parameters.
An op that retrieves optimization parameters from embedding to host
memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
the correct embedding table configuration. For example, this op is
used to retrieve updated parameters before saving a checkpoint.
Args:
num_shards: An `int`.
shard_id: An `int`.
table_id: An optional `int` that is `>= -1`. Defaults to `-1`.
table_name: An optional `string`. Defaults to `""`.
config: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (parameters, accumulators, linears).
parameters: A `Tensor` of type `float32`.
accumulators: A `Tensor` of type `float32`.
linears: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name,
"RetrieveTPUEmbeddingFTRLParameters", name, tld.op_callbacks,
"table_id", table_id, "table_name", table_name, "num_shards",
num_shards, "shard_id", shard_id, "config", config)
_result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result)
return _result
except _core._FallbackException:
try:
return retrieve_tpu_embedding_ftrl_parameters_eager_fallback(
table_id=table_id, table_name=table_name, num_shards=num_shards,
shard_id=shard_id, config=config, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is None:
table_name = ""
table_name = _execute.make_str(table_name, "table_name")
if config is None:
config = ""
config = _execute.make_str(config, "config")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"RetrieveTPUEmbeddingFTRLParameters", num_shards=num_shards,
shard_id=shard_id,
table_id=table_id,
table_name=table_name,
config=config, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("table_id", _op._get_attr_int("table_id"), "table_name",
_op.get_attr("table_name"), "num_shards",
_op._get_attr_int("num_shards"), "shard_id",
_op._get_attr_int("shard_id"), "config", _op.get_attr("config"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"RetrieveTPUEmbeddingFTRLParameters", _inputs_flat, _attrs, _result)
_result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result)
return _result
RetrieveTPUEmbeddingFTRLParameters = tf_export("raw_ops.RetrieveTPUEmbeddingFTRLParameters")(_ops.to_raw_op(retrieve_tpu_embedding_ftrl_parameters))
def retrieve_tpu_embedding_ftrl_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx):
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is None:
table_name = ""
table_name = _execute.make_str(table_name, "table_name")
if config is None:
config = ""
config = _execute.make_str(config, "config")
_inputs_flat = []
_attrs = ("table_id", table_id, "table_name", table_name, "num_shards",
num_shards, "shard_id", shard_id, "config", config)
_result = _execute.execute(b"RetrieveTPUEmbeddingFTRLParameters", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"RetrieveTPUEmbeddingFTRLParameters", _inputs_flat, _attrs, _result)
_result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result)
return _result
_RetrieveTPUEmbeddingFTRLParametersGradAccumDebugOutput = collections.namedtuple(
"RetrieveTPUEmbeddingFTRLParametersGradAccumDebug",
["parameters", "accumulators", "linears", "gradient_accumulators"])
def retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(num_shards, shard_id, table_id=-1, table_name="", config="", name=None):
r"""Retrieve FTRL embedding parameters with debug support.
An op that retrieves optimization parameters from embedding to host
memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
the correct embedding table configuration. For example, this op is
used to retrieve updated parameters before saving a checkpoint.
Args:
num_shards: An `int`.
shard_id: An `int`.
table_id: An optional `int` that is `>= -1`. Defaults to `-1`.
table_name: An optional `string`. Defaults to `""`.
config: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (parameters, accumulators, linears, gradient_accumulators).
parameters: A `Tensor` of type `float32`.
accumulators: A `Tensor` of type `float32`.
linears: A `Tensor` of type `float32`.
gradient_accumulators: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name,
"RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", name,
tld.op_callbacks, "table_id", table_id, "table_name", table_name,
"num_shards", num_shards, "shard_id", shard_id, "config", config)
_result = _RetrieveTPUEmbeddingFTRLParametersGradAccumDebugOutput._make(_result)
return _result
except _core._FallbackException:
try:
return retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager_fallback(
table_id=table_id, table_name=table_name, num_shards=num_shards,
shard_id=shard_id, config=config, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is None:
table_name = ""
table_name = _execute.make_str(table_name, "table_name")
if config is None:
config = ""
config = _execute.make_str(config, "config")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", num_shards=num_shards,
shard_id=shard_id,
table_id=table_id,
table_name=table_name,
config=config,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("table_id", _op._get_attr_int("table_id"), "table_name",
_op.get_attr("table_name"), "num_shards",
_op._get_attr_int("num_shards"), "shard_id",
_op._get_attr_int("shard_id"), "config", _op.get_attr("config"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", _inputs_flat, _attrs, _result)
_result = _RetrieveTPUEmbeddingFTRLParametersGradAccumDebugOutput._make(_result)
return _result
RetrieveTPUEmbeddingFTRLParametersGradAccumDebug = tf_export("raw_ops.RetrieveTPUEmbeddingFTRLParametersGradAccumDebug")(_ops.to_raw_op(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug))
def retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx):
num_shards = _execute.make_int(num_shards, "num_shards")
shard_id = _execute.make_int(shard_id, "shard_id")
if table_id is None:
table_id = -1
table_id = _execute.make_int(table_id, "table_id")
if table_name is | |
#!/usr/bin/python
"""
Python class that handles projection calculations, operations and augmentation special effects.
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
class ProjectionManager:
# Initialize projectionManager
def __init__(self, camCal, keepN=10, gradientLevel=75, debug=False):
# set debugging
self.debug = debug
# frameNumber
self.curFrame = None
# keep last N
self.keepN = keepN
# keep our own copy of the camera calibration
self.camCal = camCal
# our own copy of the camera calibration results
self.mtx, self.dist, self.img_size = camCal.get()
# normal image size
self.x, self.y = self.img_size
# based on hough3 (default)
self.z = self.y / 45
# projection mask calculations
self.xbottom1 = int(self.x / 16)
self.xbottom2 = int(self.x * 15 / 16)
self.xtop1 = int(self.x * 14 / 32)
self.xtop2 = int(self.x * 18 / 32)
self.ybottom1 = self.y
self.ybottom2 = self.y
self.ytopbox = int(self.y * 9 / 16)
# mid point in picture (by height)
self.mid = int(self.y / 2)
# ghosting
self.roadGhost = np.zeros((self.mid, self.x), dtype=np.uint8)
# gradient level starts here
self.gradient0 = self.mid + gradientLevel
# current image Filter
self.curImgFtr = None
# current road corners
self.curSrcRoadCorners = None
# current horizon
self.curHorizon = None
# current gradient
self.curGradient = None
# last n projected image filters
self.recentProjected = []
# last n road corners
self.recentRoadCorners = []
# last n horizon detected
self.recentHorizon = []
# last n gradient detected
self.recentGradient = []
# for 3D reconstruction and augmentation
self.rvecs = None
self.tvecs = None
self.inliers = None
# our projection settings - FULLHD 1080p on its side.
self.projectedX = 1080
self.projectedY = 1920
# US highway width: 12 feet wide
self.lane_width = 12
# scaling for display
self.scale_factor = 6.0
# set up debugging diag screens
if self.debug:
self.diag1 = np.zeros((self.mid, self.x, 3), dtype=np.float32)
self.diag2 = np.zeros((self.y, self.x, 3), dtype=np.float32)
self.diag3 = np.zeros((self.projectedY, self.projectedX, 3), dtype=np.float32)
self.diag4 = np.zeros((self.projectedY, self.projectedX, 3), dtype=np.float32)
# set current image filter
def set_image_filter(self, imgFtr):
self.curImgFtr = imgFtr
# create a region of interest mask
def region_of_interest(self, img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with
# depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill
# color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
# draw outline of given area
def draw_area_of_interest(self, img, areas, color=(128, 0, 128), thickness=2):
for points in areas:
for i in range(len(points) - 1):
cv2.line(img,
(points[i][0],
points[i][1]),
(points[i + 1][0],
points[i + 1][1]), color, thickness)
cv2.line(img,
(points[0][0],
points[0][1]),
(points[len(points) - 1][0],
points[len(points) - 1][1]), color, thickness)
# draw outline of given area
def draw_area_of_interest_for_projection(self, img, areas, color=(128, 0, 128), thickness1=2, thickness2=10):
for points in areas:
for i in range(len(points) - 1):
if i == 0 or i == 1:
cv2.line(img, (points[i][0], points[i][1]), (points[
i + 1][0], points[i + 1][1]), color, thickness1)
else:
cv2.line(img, (points[i][0], points[i][1]), (points[
i + 1][0], points[i + 1][1]), color, thickness2)
cv2.line(img,
(points[0][0],
points[0][1]),
(points[len(points) - 1][0],
points[len(points) - 1][1]), color, thickness1)
def draw_masked_area(self, img, areas, color=(128, 0, 128), thickness=2):
for points in areas:
for i in range(len(points) - 1):
cv2.line(img,
(points[i][0],
points[i][1]),
(points[i + 1][0],
points[i + 1][1]), color, thickness)
cv2.line(img,
(points[0][0],
points[0][1]),
(points[len(points) - 1][0],
points[len(points) - 1][1]), color, thickness)
def draw_bounding_box(self, img, boundingbox, color=(0, 255, 0), thickness=6):
x1, y1, x2, y2 = boundingbox
cv2.line(img, (x1, y1), (x2, y1), color, thickness)
cv2.line(img, (x2, y1), (x2, y2), color, thickness)
cv2.line(img, (x2, y2), (x1, y2), color, thickness)
cv2.line(img, (x1, y2), (x1, y1), color, thickness)
# draw parallel lines in a perspective image that will later be projected
# into a flat surface
def draw_parallel_lines_pre_projection(self, img, lane_info, color=(128, 0, 0), thickness=5):
lx1 = lane_info[3][0]
rx1 = lane_info[4][0]
rx2 = lane_info[5][0]
lx2 = lane_info[6][0]
ly1 = lane_info[3][1]
ry1 = lane_info[4][1]
ry2 = lane_info[5][1]
ly2 = lane_info[6][1]
cv2.line(img, (lx1, ly1), (lx2, ly2), color, thickness)
cv2.line(img, (rx1, ry1), (rx2, ry2), color, thickness)
def draw_estimated_lane_line_location(self, img, base_pos, distance, color=(128, 0, 0), thickness=5):
x = int(base_pos + distance)
y1 = self.projectedY - 750
y2 = self.projectedY
cv2.line(img, (x, y1), (x, y2), color, thickness)
def draw_lines(self, img, lines, thickness=6, backoff=0, debug=False):
"""
calculate and draw initial estimated lines on the roadway.
:param img:
:param lines:
:param thickness:
:param backoff:
:param debug:
:return:
"""
if backoff == 0:
backoff = thickness * 2
ysize = img.shape[0]
midleft = img.shape[1] / 2 - 200 + backoff * 2
midright = img.shape[1] / 2 + 200 - backoff * 2
top = ysize / 2 + backoff * 2
rightslopemin = 0.5 # 8/backoff
rightslopemax = 3.0 # backoff/30
leftslopemax = -0.5 # -8/backoff
leftslopemin = -3.0 # -backoff/30
try:
# rightline and leftline cumlators
rl = {'num': 0, 'slope': 0.0, 'x1': 0, 'y1': 0, 'x2': 0, 'y2': 0}
ll = {'num': 0, 'slope': 0.0, 'x1': 0, 'y1': 0, 'x2': 0, 'y2': 0}
for line in lines:
for x1, y1, x2, y2 in line:
slope = ((y2 - y1) / (x2 - x1))
sides = (x1 + x2) / 2
vmid = (y1 + y2) / 2
if slope > rightslopemin and slope < rightslopemax and sides > midright and vmid > top: # right
if debug:
cv2.line(img, (x1, y1), (x2, y2), [128, 128, 0], thickness)
rl['num'] += 1
rl['slope'] += slope
rl['x1'] += x1
rl['y1'] += y1
rl['x2'] += x2
rl['y2'] += y2
elif slope > leftslopemin and slope < leftslopemax and sides < midleft and vmid > top: # left
if debug:
cv2.line(img, (x1, y1), (x2, y2), [128, 128, 0], thickness)
ll['num'] += 1
ll['slope'] += slope
ll['x1'] += x1
ll['y1'] += y1
ll['x2'] += x2
ll['y2'] += y2
if rl['num'] > 0 and ll['num'] > 0:
# average/extrapolate all of the lines that makes the right
# line
rslope = rl['slope'] / rl['num']
rx1 = int(rl['x1'] / rl['num'])
ry1 = int(rl['y1'] / rl['num'])
rx2 = int(rl['x2'] / rl['num'])
ry2 = int(rl['y2'] / rl['num'])
# average/extrapolate all of the lines that makes the left line
lslope = ll['slope'] / ll['num']
lx1 = int(ll['x1'] / ll['num'])
ly1 = int(ll['y1'] / ll['num'])
lx2 = int(ll['x2'] / ll['num'])
ly2 = int(ll['y2'] / ll['num'])
xi = int((ly2 - ry2 + rslope * rx2 - lslope * lx2) / (rslope - lslope))
yi = int(ry2 + rslope * (xi - rx2))
# calculate backoff from intercept for right line
if rslope > rightslopemin and rslope < rightslopemax: # right
ry1 = yi + int(backoff)
rx1 = int(rx2 - (ry2 - ry1) / rslope)
ry2 = ysize - 1
rx2 = int(rx1 + (ry2 - ry1) / rslope)
cv2.line(img, (rx1, ry1), (rx2, ry2),
[255, 0, 0], thickness)
# calculate backoff from intercept for left line
if lslope < leftslopemax and lslope > leftslopemin: # left
ly1 = yi + int(backoff)
lx1 = int(lx2 - (ly2 - ly1) / lslope)
ly2 = ysize - 1
lx2 = int(lx1 + (ly2 - ly1) / lslope)
cv2.line(img, (lx1, ly1), (lx2, ly2),
[255, 0, 0], thickness)
# if we have all of the points - draw the backoff line near the
# horizon
if lx1 > 0 and ly1 > 0 and rx1 > 0 and ry1 > 0:
cv2.line(img, (lx1, ly1), (rx1, ry1), [255, 0, 0], thickness)
# return the left and right line slope, found rectangler box shape
# and the estimated vanishing point.
return lslope + rslope, lslope, rslope, (lx1, ly1), (rx1, ry1), (rx2, ry2), (lx2, | |
<reponame>cgmeyer/gen3sdk-python
import requests, json, fnmatch, os, os.path, sys, subprocess, glob, ntpath, copy, re, operator, statistics, datetime
import pandas as pd
from os import path
from pandas.io.json import json_normalize
from collections import Counter
from statistics import mean
from io import StringIO
from IPython.utils import io
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
class Gen3Error(Exception):
pass
class Gen3Expansion:
"""Advanced scripts for interacting with the Gen3 submission, query and index APIs
Supports advanced data submission and exporting from Sheepdog.
Supports paginated GraphQL queries through Peregrine.
Supports Flat Model (ElasticSearch) queries through Arranger/Guppy.
Supports Indexd queries.
Supports user authentication queries.
Args:
endpoint (str): The URL of the data commons.
auth_provider (Gen3Auth): A Gen3Auth class instance.
Examples:
This generates the Gen3Expansion class pointed at the sandbox commons while
using the credentials.json downloaded from the commons profile page.
>>> endpoint = "https://nci-crdc-demo.datacommons.io"
... auth = Gen3Auth(endpoint, refresh_file="credentials.json")
... exp = Gen3Expansion(endpoint, auth)
"""
def __init__(self, endpoint, auth_provider, submission):
self._auth_provider = auth_provider
self._endpoint = endpoint
self.sub = submission # submission is Gen3Submission(endpoint, auth_provider)
def __export_file(self, filename, output):
"""Writes text, e.g., an API response, to a file.
Args:
filename (str): The name of the file to be created.
output (str): The contents of the file to be created.
Example:
>>> output = requests.get(api_url, auth=self._auth_provider).text
... self.__export_file(filename, output)
"""
outfile = open(filename, "w")
outfile.write(output)
outfile.close
print("Output written to file: " + filename + "\n")
### AWS S3 Tools:
def s3_ls(self, path, bucket, profile, pattern="*"):
""" Print the results of an `aws s3 ls` command """
s3_path = bucket + path
cmd = ["aws", "s3", "ls", s3_path, "--profile", profile]
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode(
"UTF-8"
)
except Exception as e:
output = e.output.decode("UTF-8")
print("ERROR:" + output)
psearch = output.split("\n")
if pattern != "*":
pmatch = fnmatch.filter(
psearch, pattern
) # if default '*', all files will match
return arrayTable(pmatch)
else:
return output
def s3_files(self, path, bucket, profile, pattern="*", verbose=True):
""" Get a list of files returned by an `aws s3 ls` command """
s3_path = bucket + path
cmd = ["aws", "s3", "ls", s3_path, "--profile", profile]
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, shell=True
).decode("UTF-8")
except Exception as e:
output = e.output.decode("UTF-8")
print("ERROR:" + output)
output = [line.split() for line in output.split("\n")]
output = [
line for line in output if len(line) == 4
] # filter output for lines with file info
output = [line[3] for line in output] # grab the filename only
output = fnmatch.filter(output, pattern) # if default '*', all files will match
if verbose == True:
print("\nIndex \t Filename")
for (i, item) in enumerate(output, start=0):
print(i, "\t", item)
return output
def get_s3_files(self, path, bucket, profile, files=None, mydir=None):
""" Transfer data from object storage to the VM in the private subnet """
# Set the path to the directory where files reside
s3_path = bucket + path
# Create folder on VM for downloaded files
if not isinstance(mydir, str):
mydir = path
if not os.path.exists(mydir):
os.makedirs(mydir)
# If files is an array of filenames, download them
if isinstance(files, list):
print("Getting files...")
for filename in files:
s3_filepath = s3_path + str(filename)
if os.path.exists(mydir + str(filename)):
print("File " + filename + " already downloaded in that location.")
else:
print(s3_filepath)
cmd = ["aws", "s3", "--profile", profile, "cp", s3_filepath, mydir]
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, shell=True
).decode("UTF-8")
except Exception as e:
output = e.output.decode("UTF-8")
print("ERROR:" + output)
# If files == None, which syncs the s3_path 'directory'
else:
print("Syncing directory " + s3_path)
cmd = ["aws", "s3", "--profile", profile, "sync", s3_path, mydir]
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, shell=True
).decode("UTF-8")
except Exception as e:
output = e.output.decode("UTF-8")
print("ERROR:" + output)
print("Finished")
# Functions for downloading metadata in TSVs
def get_project_ids(self, node=None, name=None):
"""Get a list of project_ids you have access to in a data commons.
Args:
node(str): The node you want projects to have at least one record in.
name(str): The name of the programs to get projects in, or the submitter_id of a particular record.
Example:
get_project_ids()
get_project_ids(node='demographic')
get_project_ids(node='program',name=['training','internal'])
get_project_ids(node='case',name='case-01')
"""
project_ids = []
queries = []
# Return all project_ids in the data commons if no node is provided or if node is program but no name provided
if name == None and ((node == None) or (node == "program")):
print("Getting all project_ids you have access to in the data commons.")
if node == "program":
print(
"Specify a list of program names (name = ['myprogram1','myprogram2']) to get only project_ids in particular programs."
)
queries.append("""{project (first:0){project_id}}""")
elif name != None and node == "program":
if isinstance(name, list):
print(
"Getting all project_ids in the programs '" + ",".join(name) + "'"
)
for program_name in name:
queries.append(
"""{project (first:0, with_path_to:{type:"program",name:"%s"}){project_id}}"""
% (program_name)
)
elif isinstance(name, str):
print("Getting all project_ids in the program '" + name + "'")
queries.append(
"""{project (first:0, with_path_to:{type:"program",name:"%s"}){project_id}}"""
% (name)
)
elif isinstance(node, str) and isinstance(name, str):
print(
"Getting all project_ids for projects with a path to record '"
+ name
+ "' in node '"
+ node
+ "'"
)
queries.append(
"""{project (first:0, with_path_to:{type:"%s",submitter_id:"%s"}){project_id}}"""
% (node, name)
)
elif isinstance(node, str) and name == None:
print(
"Getting all project_ids for projects with at least one record in the node '"
+ node
+ "'"
)
query = """{node (first:0,of_type:"%s"){project_id}}""" % (node)
df = json_normalize(self.sub.query(query)["data"]["node"])
project_ids = project_ids + list(set(df["project_id"]))
if len(queries) > 0:
for query in queries:
res = self.sub.query(query)
df = json_normalize(res["data"]["project"])
project_ids = project_ids + list(set(df["project_id"]))
my_ids = sorted(project_ids, key=str.lower)
print(my_ids)
return my_ids
def get_node_tsvs(
self,
node,
projects=None,
overwrite=False,
remove_empty=True,
outdir="node_tsvs",
):
"""Gets a TSV of the structuerd data from particular node for each project specified.
Also creates a master TSV of merged data from each project for the specified node.
Returns a DataFrame containing the merged data for the specified node.
Args:
node (str): The name of the node to download structured data from.
projects (list): The projects to download the node from. If "None", downloads data from each project user has access to.
Example:
>>> df = get_node_tsvs('demographic')
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
mydir = "{}/{}_tsvs".format(outdir, node)
if not os.path.exists(mydir):
os.makedirs(mydir)
if projects == None: # if no projects specified, get node for all projects
projects = list(
json_normalize(
self.sub.query("""{project (first:0){project_id}}""")["data"][
"project"
]
)["project_id"]
)
elif isinstance(projects, str):
projects = [projects]
dfs = []
df_len = 0
for project in projects:
filename = str(mydir + "/" + project + "_" + node + ".tsv")
if (os.path.isfile(filename)) and (overwrite == False):
print("File previously downloaded.")
else:
prog, proj = project.split("-", 1)
self.sub.export_node(prog, proj, node, "tsv", filename)
df1 = pd.read_csv(filename, sep="\t", header=0, index_col=False)
df_len += len(df1)
if not df1.empty:
dfs.append(df1)
print(filename + " has " + str(len(df1)) + " records.")
if remove_empty == True:
if df1.empty:
print("Removing empty file: " + filename)
cmd = ["rm", filename] # look in the download directory
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT
).decode("UTF-8")
except Exception as e:
output = e.output.decode("UTF-8")
print("ERROR deleting file: " + output)
all_data = pd.concat(dfs, ignore_index=True, sort=False)
print("length of all dfs: " + str(df_len))
nodefile = str("master_" + node + ".tsv")
all_data.to_csv(str(mydir + "/" + nodefile), sep="\t", index=False)
print(
"Master node TSV with "
+ str(len(all_data))
+ " total records written to "
+ nodefile
+ "."
)
return all_data
def get_project_tsvs(
self,
projects=None,
nodes=None,
outdir="project_tsvs",
overwrite=False,
save_empty=False,
remove_nodes=["program", "project", "root", "data_release"],
):
"""Function gets a TSV for every node in a specified project.
Exports TSV files into a directory "project_tsvs/".
Function returns a list of the contents of the directory.
Args:
projects (str/list): The project_id(s) of the project(s) to download. Can be a single project_id or a list of project_ids.
nodes(str/list): The nodes to download from each project. If None, will try to download all nodes in the data model.
overwrite (boolean): If False, the TSV file != downloaded if there is an existing file with the same name.
save_empty(boolean): If True, TSVs with no records, i.e., downloads an empty TSV template, | |
"""
x----------------------------------------------x
| AFSKmodem - Simple, reliable digital radio. |
| https://jmeifert.github.io/afskmodem |
x----------------------------------------------x
"""
import wave
import struct
import pyaudio
from datetime import datetime
import os
from time import sleep
################################################################################ PROGRAM DEFAULTS
# USER-CONFIGURABLE PARAMETERS: Although these should work fine as-is, tuning them will not affect functionality.
#
# Training sequence time in seconds (0.5-1.0, Default 0.6)
TRAINING_SEQUENCE_TIME = 0.8
#
# Chunk amplitude at which decoding starts (0-32768, Default 18000 [-5.2 dBfs])
AMPLITUDE_START_THRESHOLD = 18000
#
# Chunk amplitude at which decoding stops (0-32768, Default 14000 [-7.4 dBfs])
AMPLITUDE_END_THRESHOLD = 14000
#
# Amplifier function deadzone (0-32768, Default 128 [-48.2 dBfs])
AMPLIFIER_DEADZONE = 128
#
# Frames per buffer for audio input (1024-4096, Default 2048 [0.043s]) - Smaller blocks increase CPU usage but decrease latency
INPUT_FRAMES_PER_BLOCK = 2048
# SYSTEM PARAMETERS: DO NOT CHANGE THESE!
#
# How many samples per second we are recording (DO NOT CHANGE, sound card resamples if needed)
SAMPLE_RATE = 48000
#
# Wav format (DO NOT CHANGE, sound card handles format conversion if needed)
FORMAT = pyaudio.paInt16
#
# Input+output channels (DO NOT CHANGE, sound card handles stereo conversion if needed)
CHANNELS = 1
#
# Frames to scan for clock recovery (Should scan at least two full blocks in,
# but no more than a portion of the length of the training sequence.)
CLOCK_SCAN_WIDTH = 2 * INPUT_FRAMES_PER_BLOCK
#
# Directory where ideal waves are stored
IDEAL_WAVES_DIR = "data/ideal_waves/"
################################################################################ LOGGING
def get_date_and_time(): # Long date and time for logging
now = datetime.now()
return now.strftime('%Y-%m-%d %H:%M:%S')
# Logging level (0: INFO, 1: WARN (recommended), 2: ERROR, 3: NONE)
LOG_LEVEL = 0
#
# Should the log output to the console?
LOG_TO_CONSOLE = True
#
# Should the log output to a log file?
LOG_TO_FILE = False
#
# Where to generate logfile if need be
LOG_PATH = "afskmodem.log"
#
# How the log identifies which module is logging.
LOG_PREFIX = "(AFSKmodem)"
# Initialize log file if needed
if(LOG_TO_FILE):
try:
os.remove(LOG_PATH)
except:
pass
with open(LOG_PATH, "w") as f:
f.write(get_date_and_time() + " [INFO] " + LOG_PREFIX + " Logging initialized.\n")
def log(level: int, data: str):
if(level >= LOG_LEVEL):
output = get_date_and_time()
if(level == 0):
output += " [INFO] "
elif(level == 1):
output += " [WARN] "
else:
output += " [ERR!] "
output += LOG_PREFIX + " "
output += data
if(LOG_TO_FILE):
with open(LOG_PATH, "a") as f:
f.write(output + "\n")
if(LOG_TO_CONSOLE):
print(output)
################################################################################ DIGITAL MODULATION TYPES
class DigitalModulationTypes:
def afsk300() -> str: # Audio Frequency-Shift Keying (300 baud)
return "afsk300"
def afsk600() -> str: # Audio Frequency-Shift Keying (600 baud)
return "afsk600"
def afsk1200() -> str: # Audio Frequency-Shift Keying (1200 baud)
return "afsk1200"
def afsk2400() -> str: # Audio Frequency-Shift Keying (2400 baud)
return "afsk2400"
def afsk6000() -> str: # Audio Frequency-Shift Keying (6000 baud)
return "afsk6000"
def default() -> str: # Default (AFSK1200)
return "afsk1200"
# Unit time in samples
def get_unit_time(digital_modulation_type: str) -> int:
if(digital_modulation_type == "afsk300"):
return int(SAMPLE_RATE / 300)
elif(digital_modulation_type == "afsk600"):
return int(SAMPLE_RATE / 600)
elif(digital_modulation_type == "afsk1200"):
return int(SAMPLE_RATE / 1200)
elif(digital_modulation_type == "afsk2400"):
return int(SAMPLE_RATE / 2400)
elif(digital_modulation_type == "afsk6000"):
return int(SAMPLE_RATE / 6000)
else: # default
return int(SAMPLE_RATE / 1200)
# Training sequence oscillations for specified time
def get_ts_oscillations(sequence_time: int, digital_modulation_type: str) -> int:
if(digital_modulation_type == "afsk300"):
return int(300 * sequence_time / 2)
elif(digital_modulation_type == "afsk600"):
return int(600 * sequence_time / 2)
elif(digital_modulation_type == "afsk1200"):
return int(1200 * sequence_time / 2)
elif(digital_modulation_type == "afsk2400"):
return int(2400 * sequence_time / 2)
elif(digital_modulation_type == "afsk6000"):
return int(6000 * sequence_time / 2)
else: # default
return int(1200 * sequence_time / 2)
# Get the frequency of the space tone for a given type
def get_space_tone(digital_modulation_type: str) -> int:
if(digital_modulation_type == "afsk300"):
return 300
elif(digital_modulation_type == "afsk600"):
return 600
elif(digital_modulation_type == "afsk1200"):
return 1200
elif(digital_modulation_type == "afsk2400"):
return 2400
elif(digital_modulation_type == "afsk6000"):
return 6000
else: # default
return 1200
# Get the frequency of the mark tone for a given type
def get_mark_tone(digital_modulation_type: str) -> int:
if(digital_modulation_type == "afsk300"):
return 600
elif(digital_modulation_type == "afsk600"):
return 1200
elif(digital_modulation_type == "afsk1200"):
return 2400
elif(digital_modulation_type == "afsk2400"):
return 4800
elif(digital_modulation_type == "afsk6000"):
return 12000
else: # default
return 2400
################################################################################ IDEAL WAVES
class IdealWaves: # Ideal waves for TX and RX
def __init__(self, digital_modulation_type = DigitalModulationTypes.default()):
self.digital_modulation_type = digital_modulation_type
# Load wav data to int array
def __load_wav_data(self, filename: str) -> list:
with wave.open(filename, "r") as f:
nFrames = f.getnframes()
expFrames = []
for i in range(0, nFrames):
sFrame = f.readframes(1)
expFrames.append(struct.unpack("<h", sFrame)[0])
return expFrames
# Load wav data to bytes
def __load_raw_wav_data(self, filename: str) -> bytes:
with wave.open(filename, "r") as f:
nFrames = f.getnframes()
return f.readframes(nFrames)
# Silence (20ms) to pad output with for TX
def get_tx_silence(self) -> bytes:
return self.__load_raw_wav_data(IDEAL_WAVES_DIR + "_.wav")
# Space tone as bytes for TX
def get_tx_space(self) -> bytes:
return self.__load_raw_wav_data(IDEAL_WAVES_DIR + self.digital_modulation_type + "/0.wav")
# Mark tone as bytes for TX
def get_tx_mark(self) -> bytes:
return self.__load_raw_wav_data(IDEAL_WAVES_DIR + self.digital_modulation_type + "/1.wav")
# Space tone as int array for RX
def get_rx_space(self) -> list:
return self.__load_wav_data(IDEAL_WAVES_DIR + self.digital_modulation_type + "/0.wav")
# Mark tone as int array for RX
def get_rx_mark(self) -> list:
return self.__load_wav_data(IDEAL_WAVES_DIR + self.digital_modulation_type + "/1.wav")
# Ideal training sequence oscillation for RX clock recovery
def get_rx_training(self) -> list:
return self.get_rx_mark() + self.get_rx_space()
################################################################################ HAMMING ECC
class Hamming:
# Each instance of Hamming keeps track of the errors it corrects.
# An instance of Hamming is created for each DigitalTransmitter or DigitalReceiver instance.
def __init__(self):
self.r = 4
self.error_count = 0
def reset_error_count(self): # Reset error count to 0
self.error_count = 0
def get_error_count(self) -> int: # Get error count
return self.error_count
def __increment_error_count(self): # Increment error count
self.error_count += 1
# Pad the positions of parity bits with 0
def __pad_parity_bits(self, data: str) -> str:
j = 0
k = 1
m = len(data)
p = ''
for i in range(1, m + self.r+1):
if(i == 2**j):
p += '0'
j += 1
else:
p += data[-1 * k]
k += 1
return p[::-1]
# Set the parity bits to their correct values
def __set_parity_bits(self, data: str) -> str:
n = len(data)
for i in range(self.r):
p = 0
for j in range(1, n + 1):
if(j & (2**i) == (2**i)):
p = p ^ int(data[-1 * j])
data = data[:n-(2**i)] + str(p) + data[n-(2**i)+1:]
return data
# Find an error (if it exists)
def __get_error_index(self, data: str) -> int:
n = len(data)
p = 0
for i in range(self.r):
val = 0
for j in range(1, n + 1):
if(j & (2**i) == (2**i)):
val = val ^ int(data[-1 * j])
p += val*(10**i)
return n - int(str(p), 2)
# Trim the parity bits off a corrected message to get the contained data
def __trim_parity_bits(self, data: str) -> str:
data = data[::-1]
p = ""
j = 0
for i in range(len(data)):
if(i + 1 != 2 ** j):
p += data[i]
else:
j += 1
return p[::-1]
# Correct a single error in a section of data.
def __correct_errors(self, data: str) -> str:
error_pos = self.__get_error_index(data)
if(error_pos == len(data)):
return data
else:
self.__increment_error_count()
data_list = list(data)
if(data_list[error_pos] == "0"):
data_list[error_pos] = "1"
else:
data_list[error_pos] = "0"
data = "".join(data_list)
return data
# Single function handling generating Hamming code. Returns a tuple with the
# result bit string and the number of redundant bits.
def encode(self, data: str) -> str:
padded_data = self.__pad_parity_bits(data)
parity_data = self.__set_parity_bits(padded_data)
return(parity_data)
# Single function handling correcting and getting useful data from Hamming
# code. Returns the data payload.
def decode(self, data: str) -> str:
corrected_data = self.__correct_errors(data)
output_data = self.__trim_parity_bits(corrected_data)
return(output_data)
################################################################################ RX TOOLS
class DigitalReceiver:
def __init__(self,
digital_modulation_type = DigitalModulationTypes.default(),
amp_start_threshold = AMPLITUDE_START_THRESHOLD,
amp_end_threshold = AMPLITUDE_END_THRESHOLD,
amp_deadzone = AMPLIFIER_DEADZONE):
self.digital_modulation_type = digital_modulation_type
self.amp_start_threshold = amp_start_threshold
self.amp_end_threshold = amp_end_threshold
self.amp_deadzone = amp_deadzone
self.unit_time = DigitalModulationTypes.get_unit_time(self.digital_modulation_type)
self.space_tone = DigitalModulationTypes.get_space_tone(self.digital_modulation_type)
self.mark_tone = DigitalModulationTypes.get_mark_tone(self.digital_modulation_type)
ideal_waves = IdealWaves(digital_modulation_type = self.digital_modulation_type)
self.rx_space = ideal_waves.get_rx_space()
self.rx_mark = ideal_waves.get_rx_mark()
self.rx_training = ideal_waves.get_rx_training()
self.ecc = Hamming()
# Load raw wav data from file
def __load_raw_wav_data(self, filename: str) -> bytes:
with wave.open(filename, | |
low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymul, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polydiv(c1,c2)
(array([ 3.]), array([-8., -4.]))
>>> P.polydiv(c2,c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
len1 = len(c1)
len2 = len(c2)
if len2 == 1:
return c1/c2[-1], c1[:1]*0
elif len1 < len2:
return c1[:1]*0, c1
else:
dlen = len1 - len2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
j = len1 - 1
while i >= 0:
c1[i:j] -= c2*c1[j]
i -= 1
j -= 1
return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
def polypow(c, pow, maxpower=None):
"""Raise a polynomial to a power.
Returns the polynomial `c` raised to the power `pow`. The argument
`c` is a sequence of coefficients ordered from low to high. i.e.,
[1,2,3] is the series ``1 + 2*x + 3*x**2.``
Parameters
----------
c : array_like
1-D array of array of series coefficients ordered from low to
high degree.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Power series of power.
See Also
--------
polyadd, polysub, polymul, polydiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = np.convolve(prd, c)
return prd
def polyder(c, m=1, scl=1, axis=0):
"""
Differentiate a polynomial.
Returns the polynomial coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The
argument `c` is an array of coefficients from low to high degree along
each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``
while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is
``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of polynomial coefficients. If c is multidimensional the
different axis correspond to different variables with the degree
in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change
of variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Polynomial coefficients of the derivative.
See Also
--------
polyint
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3
>>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2
array([ 2., 6., 12.])
>>> P.polyder(c,3) # (d**3/dx**3)(c) = 24
array([ 24.])
>>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2
array([ -2., -6., -12.])
>>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x
array([ 6., 24.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
cdt = c.dtype
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=cdt)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a polynomial.
Returns the polynomial coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients, from low to high degree along each axis, e.g., [1,2,3]
represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]
represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
1-D array of polynomial coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Coefficient array of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``.
See Also
--------
polyder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3)
>>> P.polyint(c) # should return array([0, 1, 1, 1])
array([ 0., 1., 1., 1.])
>>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
0.05 ])
>>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
array([ 3., 1., 1., 1.])
>>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
array([ 6., 1., 1., 1.])
>>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype doesn't preserve mask attribute.
c = c + 0.0
cdt = c.dtype
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return | |
<reponame>mthjwu/ask
################################################################################
# circular binary segmentation implementation
# (ugly max t-statistic searching method,
# might only work for high focal amplification)
# currently input should be absolute copy number
# todo:
# include 1 bin spikes
################################################################################
#------------------------------------------------------------------------------#
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from itertools import groupby, combinations
from operator import itemgetter
#------------------------------------------------------------------------------#
from grange import GRange
import misc
#------------------------------------------------------------------------------#
# select amplified segments on cbs results
#------------------------------------------------------------------------------#
def cbs_amplicon(seg_df, min_cn = 5):
"""get amplicon from cbs segmentation results
"""
return seg_df[seg_df.CN >= min_cn]
#------------------------------------------------------------------------------#
# cbs segmentation
#------------------------------------------------------------------------------#
def cbs(bin_count, binsize = 10000, nperm = 1000, p = 0.01):
""" get copy number segmentations from bin count data
output a dataframe of amplicons
Parameters
----------
df : counts in genomic bins
binsize : target genomic bin size to calculate read counts
min_cn : minimal copy number
output
----------
segmentation results in dataframe
"""
df = bin_count.copy()
# calculate log2 ratio if use cbs
if ('Log2Ratio' not in df.columns):
df['Log2Ratio'] = np.log2(np.array(df.CN / 2))
# call copy number segmentations in each chromosome
cnsegs = []
for chr in misc.unique(df['Chrom']):
# print(chr)
dfsub = df[df['Chrom'] == chr]
# perform cbs on each chromosome
x = np.array(dfsub['Log2Ratio'])
seg = cbs_segment(x, nperm = nperm, p = p)
seg = cbs_recheck(x, seg, nperm = nperm, p = p)
# cbs_plot_segment(x, seg, (3000, 4000)) # plot function
seg = [list(dfsub.iloc[i[0],0:2]) +
list(dfsub.iloc[i[1]-1,1:2]+binsize) +
[i[2]] for i in seg]
cnsegs.append(pd.DataFrame(seg,
columns=['Chrom', 'Start', 'End', 'Log2Ratio']))
seg_df = pd.concat(cnsegs)
seg_df['CN'] = 2**seg_df.Log2Ratio*2
return seg_df[['Chrom', 'Start', 'End', 'CN', 'Log2Ratio']]
#------------------------------------------------------------------------------#
# filter copy number amplified segments by blacklist intervals
#------------------------------------------------------------------------------#
def amplicon_filter_by_blacklist(cn_amp, blacklistfile, f_sort=True):
"""
filter amplicon by blacklist intervals
for left and right clip, respectively
(use pandas dataframe to process and store)
"""
def filter_blacklist(df, blacklistfile):
gr1 = GRange(df, 'dataframe_hasend')
gr2 = GRange(blacklistfile, 'bedfile')
# extend 100 bp on both end of the blacklist
gr = gr1.intersect(gr2, a_extend = 0, b_extend = 100, invert = True)
return pd.DataFrame(\
[[row[0]] + [row[1].start, row[1].stop] + list(row[2]) for row in gr.gr]\
, columns=df.columns)
df = filter_blacklist(cn_amp, blacklistfile)
if (f_sort):
df = df.sort_values(['Chrom', 'Start', 'End'])
return df
#------------------------------------------------------------------------------#
# simple calling of amplicons
#------------------------------------------------------------------------------#
def simple_amplicon_segment(x, min_cn = 5):
"""
Simple way to identify amplified segments
score of genomic bins higher than a certain score
let's say bins with more than 5 copies
"""
xx = np.where(x > min_cn)[0].tolist()
segments =[]
# get the combine the continuous numbers in a list
for k,g in groupby(enumerate(xx),lambda x:x[0]-x[1]):
group = (map(itemgetter(1),g))
group = list(map(int,group))
segments.append((group[0],group[-1]+1))
# get the segmentation mean table
seg = []
for j in segments:
seg_start = j[0]
seg_end = j[1]
seg_mean = np.mean(x[seg_start:seg_end])
seg.append([seg_start, seg_end, seg_mean])
return seg
#------------------------------------------------------------------------------#
# segmentation function
#------------------------------------------------------------------------------#
def cbs_segment(x, nperm = 1000, p = 0.01, k = 1000):
"""
segmentation function
k : split to k data points per block
"""
start = 0
end = len(x)
segments = []
# get segments
for chunk in make_window(end):
segments = cbs_segment_helper(
x, chunk[0], chunk[1], segments, nperm = nperm, p = p)
# get the segmentation mean table
seg = []
for j in segments:
seg_start = j[0]
seg_end = j[1]
seg_mean = np.nanmean(x[seg_start:seg_end])
seg.append([seg_start, seg_end, seg_mean])
return seg
#------------------------------------------------------------------------------#
def cbs_segment_helper(x, start, end, segments, nperm = 1000, p = 0.01):
"""
Recursive segmentation helper function
"""
# print(start, end)
# if (end - start <= 3): # no split need for <= 3 length interval
# return segments
# print(start, end, segments)
ij = cbs_determine_ij(x, start, end, nperm = nperm, p = p)
# print(start, end, ij, segments)
# if (ij[1] - ij[0] == 1 and end - start > 3):
# ij[2] = True
if (ij[2] == False):
segments.append((start, end))
else:
if (ij[0] - start >= 2):
cbs_segment_helper(x, start, ij[0]+1, segments, nperm=nperm, p=p)
elif (ij[0] - start < 2): # left edge spike
segments.append((start, ij[0]+1))
if (ij[1]-ij[0] >= 3):
cbs_segment_helper(x, ij[0]+1, ij[1]+1, segments, nperm=nperm, p=p)
elif (ij[1]-ij[0] < 3): # middle spike
segments.append((ij[0]+1, ij[1]+1))
if (end - ij[1] >= 4):
# print(x, end, ij, segments)
cbs_segment_helper(x, ij[1]+1, end, segments, nperm=nperm, p=p)
elif (end - ij[1] < 4 and end - ij[1] > 1): # right edge spike
segments.append((ij[1]+1, end))
return segments
#------------------------------------------------------------------------------#
def make_window(n, k = 1000, l = 100):
"""bin n numbers into windows,
k points per window with l overlap
"""
op = [[i-l, i+k+l] for i in range(0, n, k)]
op[0][0] = 0
op[-1][1] = n
if (len(op) > 1 and op[-1][1]-op[-1][0] < k/2 + l):
op.pop()
op[-1][1] = n
return op
#------------------------------------------------------------------------------#
# recheck function
#------------------------------------------------------------------------------#
def cbs_recheck(x, segments, nperm = 1000, p = 0.01, tmin = 1.5):
"""
recheck whether every three splits combination are significant
tmin : minimal t stats to run permutation, too low t stats means
no difference of copy number between segments
min_fc : minimal fold change between segments
"""
sp_cand = [i[0] for i in segments]+[len(x)] # split points
sp_cand = misc.unique(sp_cand)
sp_cand.sort()
# print(sp_cand)
sp = [0]
while (len(sp_cand) >= 3):
start = sp_cand[0]
mid = sp_cand[1]
end = sp_cand[2]
i = mid - start - 1
xs = x[start:end]
S = np.cumsum(xs) # cumulative sum of sliced x
tmax = cbs_tstats_ij(S, i) # t-statistic
if (tmax >= tmin):
tf = cbs_permute(xs, tmax, i, nperm = nperm, p = p) # permutation
else:
tf = False
# print(start, mid, end, tmax, tf)
if (tf == True):
sp.append(mid)
# sp.append(end)
sp_cand.remove(start)
else:
# if mid in sp: sp.remove(mid)
sp_cand.remove(mid)
if (sp[-1] != sp_cand[-1]):
sp.append(sp_cand[-1]) # add chrom end
sp = misc.unique(sp) # make unique breaks
seg = []
for j in range(1, len(sp)):
seg_start = sp[j - 1]
seg_end = sp[j]
seg_mean = np.mean(x[seg_start:seg_end])
seg.append([seg_start, seg_end, seg_mean])
return seg
#------------------------------------------------------------------------------#
# sub functions
#------------------------------------------------------------------------------#
def cbs_tstats_ij(S, i, j = None):
"""
calculate the t-statistic for i, j breaks or one i break
S : np.array of cumulative sum of x
i : np.array of segment 1 end
j : np.array of segment 2 end (optional)
"""
if (j is not None):
Si = S[j] - S[i]
Sj = S[-1] - Si
k = j - i
n = len(S)
Tn = (Si/k - Sj/(n-k))
Td = (1/k+1/(n-k))**(1/2) # assume equal variance, cancel np.std(x, ddof=1)
T = abs(Tn/Td)
else:
Si = S[i]
Sj = S[-1] - Si
k = i + 1
n = len(S)
Tn = (Si/k - Sj/(n-k))
Td = (1/k+1/(n-k))**(1/2) # assume equal variance, cancel np.std(x, ddof=1)
T = abs(Tn/Td)
return T # return t-statistic and fold change
#------------------------------------------------------------------------------#
def cbs_permute(x, tmax, i, j = None, nperm = 1000, p = 0.01):
"""
permutation test for t-statistic
x: copy number data per genomic bin
i: segment 1 end
j: segment 2 end
tmax: max t-statistic between two segments
nperm: # of permutations
p: p-value cutoff
"""
h0_count = 0
alpha = nperm * p
xp = x.copy()
for p in range(nperm):
seed = p
np.random.seed(seed)
np.random.shuffle(xp)
S = np.cumsum(xp)
if (j is not None): # two split points
h0 = cbs_tstats_ij(S, i, j)
else: # one split point
h0 = cbs_tstats_ij(S, i)
if h0 >= tmax:
h0_count += 1
if h0_count > alpha:
return False
return True
#------------------------------------------------------------------------------#
def cbs_determine_ij(x, start, end, nperm = 1000, p = 0.01, tmin = 1.5):
"""
Determine i and j at max t-statistic
x: copy number data per genomic bin
start: start index in x
end: end index in x
tmin : minimal t stats to run permutation, too low t stats means
no difference of copy number between segments
slice x by start and end to perform the analysis
and output the i, j split achieve max t-statistic
"""
if (end - start <= 3):
return [start, end, False]
xs = x[start:end]
S = np.cumsum(xs) # cumulative sum of sliced x
## refine the i j choice by t-statistic
ii = []
jj = []
xs_len = len(xs)
for k1 in range(1, xs_len - 2):
for k2 in range(k1 + 2, xs_len):
ii.append(k1)
jj.append(k2)
ii = np.array(ii)
jj = np.array(jj)
# comb = list(combinations(range(len(xs)), 2))
# ii = | |
<filename>airport/kube/api.py
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
try:
from yaml import CLoader as YamlLoader
except ImportError:
from yaml import Loader as YamlLoader # type: ignore
import yaml
from kubernetes.utils.quantity import parse_quantity
from pydantic import BaseModel
from pydantic.fields import Field
DefaultDatetime = datetime.fromtimestamp(0)
class KubeModel(BaseModel):
def yaml(self, **kwargs,) -> str:
return yaml.dump(self.dict(**kwargs))
@classmethod
def parse_yaml(cls, data: Union[str, bytes], *, loader=YamlLoader) -> "KubeModel":
data = yaml.load(data, Loader=loader)
return cls.parse_obj(data)
class KubeEnum(str, Enum):
...
class ResourceQuantity(Decimal):
def __new__(cls, quantity: Union[str, float, Decimal] = 0) -> "ResourceQuantity":
quantity = parse_quantity(quantity)
return super().__new__(cls, quantity) # noqa
def __repr__(self):
return f"{self.__class__.__name__}('{self}')"
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v: Union[str, float]):
quantity = parse_quantity(v)
return cls(quantity) # noqa
def __add__(self, value) -> "ResourceQuantity":
return self.__class__(super().__add__(value)) # noqa
def __sub__(self, value) -> "ResourceQuantity":
return self.__class__(super().__sub__(value)) # noqa
def __mul__(self, value) -> "ResourceQuantity":
return self.__class__(super().__mul__(value)) # noqa
def __truediv__(self, value) -> "ResourceQuantity":
return self.__class__(super().__truediv__(value)) # noqa
def __floordiv__(self, value) -> "ResourceQuantity":
return self.__class__(super().__floordiv__(value)) # noqa
def __mod__(self, value) -> "ResourceQuantity":
return self.__class__(super().__mod__(value)) # noqa
def __pow__(self, value, mod=None, /) -> "ResourceQuantity":
return self.__class__(super().__pow__(value, mod)) # noqa
def __neg__(self) -> "ResourceQuantity":
return self.__class__(super().__neg__()) # noqa
def __abs__(self) -> "ResourceQuantity":
return self.__class__(super().__abs__()) # noqa
def __divmod__(self, value) -> Tuple["ResourceQuantity", "ResourceQuantity"]:
quotient, remainder = super().__divmod__(value)
return self.__class__(quotient), self.__class__(remainder) # noqa
class TypeMeta(KubeModel):
kind: str = ""
apiVersion: str = ""
class OwnerReference(KubeModel):
apiVersion: str = ""
kind: str = ""
name: str = ""
uid: str = ""
controller: bool = False
blockOwnerDeletion: bool = False
class ListMeta(KubeModel):
selfLink: str = ""
resourceVersion: str = ""
continue_value: str = Field("", alias="continue")
remainingItemCount: Optional[int]
class LocalObjectReference(KubeModel):
name: str = ""
class ObjectMeta(KubeModel):
name: str = ""
generateName: str = ""
namespace: str = ""
selfLink: str = ""
uid: str = ""
resourceVersion: str = ""
generation: str = ""
creationTimestamp: datetime = DefaultDatetime
deletionTimestamp: Optional[datetime]
deletionGracePeriodSeconds: Optional[int]
labels: Dict[str, str] = {}
annotations: Dict[str, str] = {}
ownerReferences: List[OwnerReference] = []
finalizers: List[str] = []
clusterName: str = ""
# managedFields not needed
class LabelSelectorOperator(KubeEnum):
In = "In"
NotIn = "NotIn"
Exists = "Exists"
DoesNotExist = "DoesNotExist"
class LabelSelectorRequirement(KubeModel):
key: str = ""
operator: LabelSelectorOperator
values: List[str] = []
class LabelSelector(KubeModel):
matchLabels: Dict[str, str] = {}
matchExpressions: List[LabelSelectorRequirement] = []
class ResourceName(KubeEnum):
CPU = "cpu"
Memory = "memory"
Storage = "storage"
EphemeralStorage = "ephemeral-storage"
ResourceList = Dict[Union[ResourceName, str], ResourceQuantity]
class ResourceRequirements(KubeModel):
limits: ResourceList = {}
requests: ResourceList = {}
class TypedLocalObjectReference(KubeModel):
kind: str = ""
name: str = ""
apiGroup: Optional[str]
class PersistentVolumeAccessMode(KubeEnum):
ReadWriteOnce = "ReadWriteOnce"
ReadOnlyMany = "ReadOnlyMany"
ReadWriteMany = "ReadWriteMany"
class PersistentVolumeMode(KubeEnum):
Block = "Block"
Filesystem = "Filesystem"
class PersistentVolumeClaimSpec(KubeModel):
accessModes: List[PersistentVolumeAccessMode] = []
selector: Optional[LabelSelector]
resources: ResourceRequirements = ResourceRequirements()
volumeName: str = ""
storageClassName: Optional[str]
volumeMode: Optional[PersistentVolumeMode]
dataSource: Optional[TypedLocalObjectReference]
class PersistentVolumeClaimPhase(KubeEnum):
Pending = "Pending"
Bound = "Bound"
Lost = "Lost"
class PersistentVolumeClaimCondition(KubeEnum):
Resizing = "Resizing"
FileSystemResizePending = "FileSystemResizePending"
class PersistentVolumeClaimStatus(KubeModel):
phase: PersistentVolumeClaimPhase
accessModes: List[PersistentVolumeAccessMode] = []
capacity: ResourceList = {}
conditions: List[PersistentVolumeClaimCondition] = []
class KeyToPath(KubeModel):
key: str = ""
path: str = ""
mode: Optional[int] = Field(None, ge=0, le=0o777)
class HostPathType(KubeEnum):
Unset = ""
DirectoryOrCreate = "DirectoryOrCreate"
Directory = "Directory"
FileOrCreate = "FileOrCreate"
File = "File"
Socket = "Socket"
CharDevice = "CharDevice"
BlockDevice = "BlockDevice"
class HostPathVolumeSource(KubeModel):
path: str = ""
type: HostPathType = HostPathType.Unset
class StorageMedium(KubeEnum):
Default = ""
Memory = "Memory"
HugePages = "HugePages"
class EmptyDirVolumeSource(KubeModel):
medium: StorageMedium = StorageMedium.Default
sizeLimit: Optional[ResourceQuantity]
class SecretVolumeSource(KubeModel):
secretName: str = ""
items: List[KeyToPath] = []
defaultMode: int = 0o0644
optional: Optional[bool]
class PersistentVolumeClaimVolumeSource(KubeModel):
claimName: str = ""
readOnly: bool = False
class ConfigMapVolumeSource(LocalObjectReference):
items: List[KeyToPath] = []
defaultMode: int = 0o0644
optional: Optional[bool]
class CSIVolumeSource(KubeModel):
driver: str = ""
readOnly: bool = False
fsType: Optional[str]
volumeAttributes: Dict[str, str] = {}
nodePublishSecretRef: Optional[LocalObjectReference]
class VolumeSource(KubeModel):
hostPath: Optional[HostPathVolumeSource]
emptyDIr: Optional[EmptyDirVolumeSource]
secret: Optional[SecretVolumeSource]
persistentVolumeClaim: Optional[PersistentVolumeClaimVolumeSource]
configMap: Optional[ConfigMapVolumeSource]
csi: Optional[CSIVolumeSource]
# Unsupported
# --------------------
# GCEPersistentDisk
# AWSElasticBlockStore
# GitRepo
# NFS
# ISCSI
# Glusterfs
# RBD
# FlexVolume
# Cinder
# CephFS
# Flocker
# DownwardAPI
# FC
# AzureFile
# VsphereVolume
# Quobyte
# AzureDisk
# PhotonPersistentDisk
# Projected
# PortworxVolume
# ScaleIO
class Volume(VolumeSource):
name: str = ""
class Protocol(KubeEnum):
TCP = "TCP"
UDP = "UDP"
SCTP = "SCTP"
class ContainerPort(KubeModel):
name: str = ""
hostPort: Optional[int] = Field(None, gt=0, lt=65536)
containerPort: Optional[int] = Field(None, gt=0, lt=65536)
protocol: Protocol = Protocol.TCP
hostIP: str = ""
class ConfigMapEnvSource(LocalObjectReference):
optional: Optional[bool]
class SecretEnvSource(LocalObjectReference):
optional: Optional[bool]
class EnvFromSource(KubeModel):
prefix: str = ""
configMapRef: Optional[ConfigMapEnvSource]
secretRef: Optional[SecretEnvSource]
class ObjectFieldSelector(KubeModel):
apiVersion: str = ""
fieldPath: str = ""
class ResourceFieldSelector(KubeModel):
containerName: str = ""
resource: str = ""
divisor: ResourceQuantity = ResourceQuantity()
class ConfigMapKeySelector(LocalObjectReference):
key: str = ""
optional: Optional[bool]
class SecretKeySelector(LocalObjectReference):
key: str = ""
optional: Optional[bool]
class EnvVarSource(KubeModel):
fieldRef: Optional[ObjectFieldSelector]
resourceFieldRef: Optional[ResourceFieldSelector]
configMapKeyRef: Optional[ConfigMapKeySelector]
secretKeyRef: Optional[SecretKeySelector]
class EnvVar(KubeModel):
name: str = ""
value: str = ""
valueFrom: Optional[EnvVarSource]
class MountPropagationMode(KubeEnum):
NoneMode = "None"
HostToContainer = "HostToContainer"
Bidirectional = "Bidirectional"
class VolumeMount(KubeModel):
name: str = ""
readOnly: bool = False
mountPath: str = ""
subPath: str = ""
mountPropagation: MountPropagationMode = MountPropagationMode.NoneMode
subPathExpr: str = ""
class VolumeDevice(KubeModel):
name: str = ""
devicePath: str = ""
class ExecAction(KubeModel):
command: List[str] = []
class HttpHeader(KubeModel):
name: str = ""
value: str = ""
class URIScheme(KubeEnum):
HTTP = "HTTP"
HTTPS = "HTTPS"
class HTTPGetAction(KubeModel):
path: str = ""
port: Union[int, str] = ""
host: str = ""
uriSchema: URIScheme = Field(URIScheme.HTTP, alias="schema")
httpHeaders: List[HttpHeader] = []
class TCPSocketAction(KubeModel):
port: Union[str, int] = ""
host: str = ""
class Handler(KubeModel):
exec: Optional[ExecAction]
httpGet: Optional[HTTPGetAction]
tcpSocket: Optional[TCPSocketAction]
class Probe(Handler):
initialDelaySeconds: int = 0
timeoutSeconds: int = 1
periodSeconds: int = 10
successThreshold: int = 1
failureThreshold: int = 3
Capability = str
class Capabilities(KubeModel):
add: List[Capability] = []
drop: List[Capability] = []
class SELinuxOptions(KubeModel):
user: str = ""
role: str = ""
type: str = ""
level: str = ""
class WindowsSecurityContextOptions(KubeModel):
gmsaCredentialSpecName: Optional[str]
gmsaCredentialSpec: Optional[str]
runAsUserName: Optional[str]
class ProcMountType(KubeEnum):
Default = "Default"
Unmasked = "Unmasked"
class SecurityContext(KubeModel):
capabilities: Optional[Capabilities]
privileged: bool = False
seLinuxOptions: Optional[SELinuxOptions]
windowsOptions: Optional[WindowsSecurityContextOptions]
runAsUser: Optional[int]
runAsGroup: Optional[int]
runAsNonRoot: Optional[bool]
readOnlyRootFilesystem: bool = False
allowPrivilegeEscalation: Optional[bool]
procMount: ProcMountType = ProcMountType.Default
class Lifecycle(KubeModel):
postStart: Optional[Handler]
preStop: Optional[Handler]
class TerminationMessagePolicy(KubeEnum):
File = "File"
FallbackToLogsOnError = "FallbackToLogsOnError"
class PullPolicy(KubeEnum):
Always = "Always"
Never = "Never"
IfNotPresent = "IfNotPresent"
class EphemeralContainerCommon(KubeModel):
name: str = ""
image: str = ""
command: List[str] = []
args: List[str] = []
workingDir: str = ""
ports: List[ContainerPort] = []
envFrom: List[EnvFromSource] = []
env: List[EnvVar] = []
resources: ResourceRequirements = ResourceRequirements()
volumeMounts: List[VolumeMount] = []
volumeDevices: List[VolumeDevice] = []
livenessProbe: Optional[Probe]
readinessProbe: Optional[Probe]
startupProbe: Optional[Probe]
lifecycle: Optional[Lifecycle]
terminationMessagePath: str = ""
terminationMessagePolicy: TerminationMessagePolicy = TerminationMessagePolicy.File
imagePullPolicy: PullPolicy = PullPolicy.Always
securityContext: Optional[SecurityContext]
stdin: bool = False
stdinOnce: bool = False
tty: bool = False
class EphemeralContainer(EphemeralContainerCommon):
targetContainerName: str = ""
class Container(KubeModel):
name: str = ""
image: str = ""
command: List[str] = []
args: List[str] = []
workingDir: str = ""
ports: List[ContainerPort] = []
envFrom: List[EnvFromSource] = []
env: List[EnvVar] = []
resources: ResourceRequirements = ResourceRequirements()
volumeMounts: List[VolumeMount] = []
volumeDevices: List[VolumeDevice] = []
livenessProbe: Optional[Probe]
readinessProbe: Optional[Probe]
startupProbe: Optional[Probe]
lifecycle: Optional[Lifecycle]
terminationMessagePath: str = "/dev/termination-log"
terminationMessagePolicy: TerminationMessagePolicy = TerminationMessagePolicy.File
imagePullPolicy: PullPolicy = PullPolicy.Always
securityContext: Optional[SecurityContext]
stdin: bool = False
stdinOnce: bool = False
tty: bool = False
class Sysctl(KubeModel):
name: str = ""
value: str = ""
class PodSecurityContext(KubeModel):
seLinuxOptions: Optional[SELinuxOptions]
windowsOptions: Optional[WindowsSecurityContextOptions]
runAsUser: Optional[int]
runAsGroup: Optional[int]
runAsRoot: Optional[bool]
supplementalGroups: List[int] = []
fsGroup: Optional[int]
sysctls: List[Sysctl] = []
class NodeSelectorOperator(KubeEnum):
In = "In"
NotIn = "NotIN"
Exists = "Exists"
DoesNotExist = "DoesNotExist"
Gt = "Gt"
Lt = "Lt"
class NodeSelectorRequirement(KubeModel):
key: str = ""
operator: NodeSelectorOperator
values: List[str] = []
class NodeSelectorTerm(KubeModel):
matchExpressions: List[NodeSelectorRequirement] = []
matchFields: List[NodeSelectorRequirement] = []
class NodeSelector(KubeModel):
nodeSelectorTerms: List[NodeSelectorTerm] = []
class PreferredSchedulingTerm(KubeModel):
weight: int = Field(None, ge=1, le=100)
preference: NodeSelectorTerm = NodeSelectorTerm()
class NodeAffinity(KubeModel):
requiredDuringSchedulingIgnoredDuringExecution: Optional[NodeSelector]
preferredDuringSchedulingIgnoredDuringExecution: List[PreferredSchedulingTerm] = []
class PodAffinityTerm(KubeModel):
labelSelector: Optional[LabelSelector]
namespaces: List[str] = []
topologyKey: str = ""
class WeightedPodAffinityTerm(KubeModel):
weight: int = Field(None, ge=1, le=100)
podAffinityTerm: PodAffinityTerm = PodAffinityTerm()
class PodAffinity(KubeModel):
requiredDuringSchedulingIgnoredDuringExecution: List[PodAffinityTerm] = []
preferredDuringSchedulingIgnoredDuringExecution: List[WeightedPodAffinityTerm] = []
class PodAntiAffinity(KubeModel):
requiredDuringSchedulingIgnoredDuringExecution: List[PodAffinityTerm] = []
preferredDuringSchedulingIgnoredDuringExecution: List[WeightedPodAffinityTerm] = []
class Affinity(KubeModel):
nodeAffinity: | |
+ ', '.join([m.get(k, None) for k in self.add_keys])
self.update_statusbar(msg, success=True)
class AddEmail(AddRow):
def __init__(self, parent=None):
super().__init__(parent=parent, window_title='Add Email')
IPF = InputField
self.add_input(field=IPF(text='MineSite', default=gbl.get_minesite()), items=cf.config['MineSite'])
self.add_input(field=IPF(text='Email'))
self.add_input(field=IPF(text='User Group', default=self.mw.u.usergroup), items=db.domain_map.keys())
self.name = 'email'
self.add_keys = ['MineSite', 'Email']
class AddEvent(AddRow):
def __init__(self, parent=None):
super().__init__(parent=parent, window_title='Add Event')
fc_number = None
IPF, add = InputField, self.add_input
is_cummins = parent.u.is_cummins if not parent is None else False
self._save_items = ('unit_qcombobox',)
self.name = 'event'
self.add_keys = ['Unit', 'Title']
layout = self.v_layout
df = db.get_df_unit()
add(field=IPF(text='MineSite', default=self.minesite), items=db.get_list_minesite())
add(field=IPF(text='Unit', enforce=True), items=list(df[df.MineSite == self.minesite].Unit))
add(field=IPF(text='Date', dtype='date', col_db='DateAdded'))
# Add btn to check smr
btn = self.create_button('Get SMR')
btn.clicked.connect(self.get_smr)
self.add_input(field=IPF(text='SMR', dtype='int'), btn=btn)
if not is_cummins:
# Checkboxes
cb_eventfolder = ff.CheckBox('Create Event Folder', checked=True)
# cb_onedrive = ff.CheckBox('Create OneDrive Folder', checked=True)
tsi_checked = True if parent and parent.name == 'TSI' else False
cb_tsi = ff.CheckBox('Create TSI', checked=tsi_checked)
# FC
cb_fc = ff.CheckBox('Link FC')
cb_fc.stateChanged.connect(self.select_fc)
btn_open_fcs = btn = self.create_button('Open FCs')
btn_open_fcs.setToolTip('View all open FCs for current unit.')
btn_open_fcs.clicked.connect(self.show_open_fcs)
box_fc = QHBoxLayout()
box_fc.addWidget(cb_fc)
box_fc.addWidget(btn_open_fcs)
# outstanding FCs
box_fc_out = QHBoxLayout()
label_fc = QLabel('Outstanding FCs: ')
label_fc_m = QLabel('M: ')
label_fc_other = QLabel('Other: ')
box_fc_out.addWidget(label_fc)
box_fc_out.addStretch(1)
box_fc_out.addWidget(label_fc_m)
box_fc_out.addWidget(label_fc_other)
# cb_eventfolder.stateChanged.connect(self.toggle_ef)
self.form_layout.addRow('', cb_eventfolder)
# self.form_layout.addRow('', cb_onedrive)
self.form_layout.addRow('', cb_tsi)
add_linesep(self.form_layout)
self.form_layout.addRow('', box_fc)
self.form_layout.addRow('', box_fc_out)
add_linesep(self.form_layout)
add(field=IPF(text='Title', dtype='textbox', enforce=True))
add(field=IPF(text='Failure Cause', dtype='textbox'))
# Warranty Status
if is_cummins:
wnty_default = 'WNTY'
list_name = 'WarrantyTypeCummins'
else:
list_name = 'WarrantyType'
wnty_default = 'Yes'
add(
field=IPF(
text='Warranty Status',
col_db='WarrantyYN',
default=wnty_default),
items=cf.config['Lists'][list_name])
add(field=IPF(text='Work Order', col_db='WorkOrder'))
add(field=IPF(text='WO Customer', col_db='SuncorWO'))
add(field=IPF(text='PO Customer', col_db='SuncorPO'))
self.add_component_fields()
self._restore_settings()
self.fUnit.box.select_all()
f.set_self(vars())
self.accepted.connect(self.close)
self.rejected.connect(self.close)
self.fUnit.box.currentIndexChanged.connect(self.update_open_fc_labels)
self.update_open_fc_labels()
self.show()
def update_open_fc_labels(self):
"""Update outstanding FC labels for M and Other when unit changed"""
if self.is_cummins:
return # fc labels not implemented for cummins
unit = self.fUnit.val
s = db.get_df_fc(default=True, unit=unit) \
.groupby(['Type']).size()
count_m = s.get('M', 0)
count_other = sum(s[s.index != 'M'].values)
self.label_fc_m.setText(f'M: {count_m}')
self.label_fc_other.setText(f'Other: {count_other}')
color = '#ff5454' if count_m > 0 else 'white'
self.label_fc_m.setStyleSheet(f"""QLabel {{color: {color}}}""")
def toggle_ef(self, state):
"""Toggle OneDrive folder when EventFolder change"""
# source = self.sender()
# box = source.box
cb = self.cb_onedrive
if Qt.CheckState(state) == Qt.CheckState.Checked:
cb.setEnabled(True)
else:
cb.setEnabled(False)
@pyqtSlot(int)
def component_changed(self, ix):
# Update Title text when Component selected in combobox
combo = self.sender()
val = combo.val
if not val.strip() == '':
self.fTitle.val = f'{val} - CO'
def create_row(self):
row = super().create_row()
row.UID = self.create_uid()
row.CreatedBy = self.mainwindow.username if not self.mainwindow is None else ''
row.StatusEvent = 'Work In Progress'
row.StatusWO = 'Open'
row.Seg = 1
row.Pictures = 0
return row
def create_button(self, name, width=80):
btn = QPushButton(name, self)
btn.setFixedSize(QSize(width, btn.sizeHint().height()))
return btn
def add_component_fields(self):
# Add fields to select component/component SMR 1 + 2
IPF = InputField
def _add_component(text):
field = self.add_input(
field=IPF(
text=text,
dtype='combobox',
col_db='Floc',
enforce=True),
checkbox=True,
cb_enabled=False)
field.cb.stateChanged.connect(self.load_components)
return field
def _add_removal(text):
field = self.add_input(
field=IPF(
text=text,
dtype='combobox',
col_db='SunCOReason',
enforce=True),
enabled=False)
return field
def _add_smr(text):
btn = self.create_button('Get SMR')
btn.clicked.connect(self.get_component_smr)
field = self.add_input(field=IPF(text=text, dtype='int', col_db='ComponentSMR'), btn=btn)
field.box.setEnabled(False)
return field
add_linesep(self.form_layout)
for suff in ('', ' 2'):
field_comp = _add_component(f'Component CO{suff}')
field_smr = _add_smr(f'Component SMR{suff}')
field_removal = _add_removal(f'Removal Reason{suff}')
field_comp.box_smr = field_smr.box # lol v messy
field_smr.box_comp = field_comp.box
field_comp.box_removal = field_removal.box
add_linesep(self.form_layout)
self.fComponentCO.box.currentIndexChanged.connect(self.component_changed)
@property
def df_comp(self):
if not hasattr(self, '_df_comp') or self._df_comp is None:
self._df_comp = db.get_df_component()
return self._df_comp
def get_floc(self, component_combined):
df = self.df_comp
return df[df.Combined == component_combined].Floc.values[0]
def load_components(self, state, *args):
"""Reload components to current unit when component co toggled
- Also toggle smr boxes"""
source = self.sender() # source is checkbox
box = source.box
box_smr = box.field.box_smr
box_removal = box.field.box_removal
if Qt.CheckState(state) == Qt.CheckState.Checked:
df = self.df_comp
unit = self.fUnit.val
equip_class = db.get_unit_val(unit=unit, field='EquipClass')
s = df[df.EquipClass == equip_class].Combined
lst = f.clean_series(s)
box.set_items(lst)
# add removal reason items
lst_removal = cf.config['Lists']['RemovalReason']
box_removal.set_items(lst_removal)
# box_removal.val = 'High Hour Changeout'
box_removal.val = ''
box_removal.setEnabled(True)
box.lineEdit().selectAll()
box_smr.setEnabled(True)
box_removal.setEnabled(True)
else:
box_smr.setEnabled(False)
box_removal.setEnabled(False)
def get_component_smr(self):
source = self.sender()
box = source.box # box is linked to btn through add_input
df = self.df_comp
unit, smr, date = self.fUnit.val, self.fSMR.val, self.fDate.val
component = box.field.box_comp.val
# spinbox returns None if val is 0
if smr is None:
smr = 0
if smr <= 0:
msg = 'Set Unit SMR first!'
msg_simple(msg=msg, icon='warning')
return
# get last CO from EL by floc
floc = self.get_floc(component_combined=component)
smr_last = smr - db.get_smr_prev_co(unit=unit, floc=floc, date=date)
if not smr_last is None:
box.val = smr_last
else:
box.val = smr
m = dict(Unit=unit, component=component)
msg = f'No previous component changeouts found for: \
\n{f.pretty_dict(m)}\n\nSetting Component SMR to current unit SMR: {smr}'
msg_simple(msg=msg, icon='warning')
def get_smr(self):
# NOTE could select all nearby dates in db and show to user
unit, date = self.fUnit.val, self.fDate.val
smr = db.get_smr(unit=unit, date=date)
if not smr is None:
self.fSMR.val = smr
else:
msg = f'No SMR found for\n\n \
Unit: {unit}\nDate: {date}\n\n \
Note - Daily SMR values are not uploaded to the database until 12:20 MST.'
msg_simple(msg=msg, icon='warning')
def create_uid(self):
return str(time.time()).replace('.', '')[:12]
def show_open_fcs(self):
"""Toggle open FCs dialog for current unit"""
if not hasattr(self, 'dlg_fc') or self.dlg_fc is None:
unit = self.fUnit.val
dlg_fc = UnitOpenFC(parent=self, unit=unit)
# move fc top left to AddEvent top right
dlg_fc.move(self.frameGeometry().topRight())
dlg_fc.show()
self.dlg_fc = dlg_fc
else:
self.dlg_fc.close()
self.dlg_fc = None
def select_fc(self, state):
"""Show dialog to select FC from list"""
if not Qt.CheckState(state) == Qt.CheckState.Checked:
return
ok, fc_number, title = fc.select_fc(unit=self.fUnit.val)
if not ok:
self.cb_fc.setChecked(False)
return
self.fTitle.val = title
self.fc_number = fc_number
def accept(self):
"""AddEvent accept adds rows differently (can queue multiple)
- just bypass everthing and call base qt accept"""
row, m = self.row, self.m
unit = self.fUnit.val
rows = []
if not self.check_enforce_items():
return
self.add_row_queue(row=row) # need to update at least row1
if not unit_exists(unit=unit):
return
# add these values to display in table
m['Model'] = db.get_unit_val(unit=unit, field='Model')
m['Serial'] = db.get_unit_val(unit=unit, field='Serial')
# Make sure title is good
self.fTitle.val = f.nice_title(self.fTitle.val)
if self.is_cummins:
row.IssueCategory = 'Engine'
else:
# create TSI row (row 1 only)
if self.cb_tsi.isChecked():
row.StatusTSI = 'Open'
if not self.mainwindow is None:
row.TSIAuthor = self.mainwindow.get_username()
self.set_row_attrs(
row=row,
exclude=['Component CO 2', 'Component SMR 2', 'Removal Reason 2'])
# Component CO 1
if self.fComponentCO.cb.isChecked():
row.ComponentCO = True
row.Floc = self.get_floc(component_combined=self.fComponentCO.box.val)
self.add_row_table(row=row)
# Component CO 2 > duplicate self.row
if self.fComponentCO2.cb.isChecked():
row2 = self.create_row()
self.set_row_attrs(row=row2)
component = self.fComponentCO2.box.val
row2.Floc = self.get_floc(component_combined=component)
row2.Title = f'{component} - CO'
row2.ComponentSMR = self.fComponentSMR2.box.val
row2.ComponentCO = True
row2.GroupCO = True
self.row2 = row2
self.add_row_queue(row=row2)
self.add_row_table(row=row2)
row.GrouCO = True
self.flush_queue()
self.accept_()
self.parent.view.resizeRowsToContents()
self.items = self.get_items() # needed before success message
self.success_message()
if not self.is_cummins:
if self.cb_fc.isChecked():
fc.link_fc_db(unit=unit, uid=row.UID, fc_number=self.fc_number)
if self.cb_eventfolder.isChecked():
from smseventlog import eventfolders as efl
ef = efl.EventFolder.from_model(e=row)
ef.create_folder(ask_show=True)
@classmethod
def _get_handled_types(cls):
"""Don't save any settings except unit_qcombobox"""
return tuple()
def closeEvent(self, event):
"""Reimplement just to close the FC dialog too, couldn't find a better way"""
try:
self.dlg_fc.close()
except Exception as e:
pass
self._save_settings()
return super().closeEvent(event)
class AddUnit(AddRow):
def __init__(self, parent=None):
super().__init__(parent=parent, window_title='Add Unit')
df = db.get_df_unit()
self.tablename = 'UnitID'
self.name = 'unit'
self.add_keys = ['Unit', 'Model']
self.add_input(field=InputField(text='Unit', enforce=True))
self.add_input(field=InputField(text='Serial', enforce=True))
self.add_input(field=InputField(text='Model'), items=f.clean_series(df.Model))
self.add_input(field=InputField(text='MineSite', default=self.minesite), items=cf.config['MineSite'])
self.add_input(field=InputField(text='Customer'), items=f.clean_series(df.Customer))
self.add_input(field=InputField(text='Engine Serial'))
self.add_input(field=InputField(text='Delivery Date', dtype='date'))
self.show()
def accept(self):
# when model is set, check if model_base exists. If not prompt to create one
model, unit = self.fModel.val, self.fUnit.val
modelbase = db.get_modelbase(model=model)
if modelbase is None:
dlg = CreateModelbase(model=model, parent=self)
if not dlg.exec():
return
super().accept()
class AddPart(AddRow):
def __init__(self, parent=None):
super().__init__(parent=parent, window_title='Add Part', enforce_all=True)
df = db.get_df_unit()
self.tablename = 'Parts'
self.name = 'part'
self.add_keys = ['Part Number', 'Part Name']
self.add_input(field=InputField(text='Part Number', col_db='PartNo', enforce='no_space'))
self.add_input(field=InputField(text='Part Name', col_db='PartName'))
self.add_input(field=InputField(text='Model'), items=f.clean_series(df.Model))
self.show()
class CreateModelbase(AddRow):
def __init__(self, model, parent=None):
super().__init__(parent=parent, window_title='Create ModelBase')
lst = [] # get list of equiptypes
df = db.get_df_equiptype()
lst = f.clean_series(df.EquipClass)
text = f'No ModelBase found for: "{model}". Select an EquipClass and create a ModelBase.\n\n' \
'(This is used for grouping models into a base folder structure. Eg "980E-4" > "980E")\n'
label = QLabel(text)
label.setMaximumWidth(300)
label.setWordWrap(True)
self.v_layout.insertWidget(0, label)
self.add_input(field=InputField(text='Equip Class'), items=lst)
self.add_input(field=InputField(text='Model Base'))
self.setMinimumSize(self.sizeHint())
f.set_self(vars())
def set_row_attrs(self, row, exclude=None):
row.Model = self.model
super().set_row_attrs(row=row, exclude=exclude)
def accept(self):
# check model base isn't blank
| |
#!/usr/bin/python
"""Given a regtest result tree, prints an HTML summary to a file.
See HTML skeleton in tests/regtest.html.
"""
import os
import re
import sys
import numpy as np
import matplotlib.pyplot as plt
SUMMARY_ROW = """\
<tfoot style="font-weight: bold; text-align: right">
<tr>
<td>
%(name)s
</td>
<!-- input params -->
<td></td>
<td></td>
<td></td>
<td></td>
<!-- RAPPOR params -->
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<!-- MAP params -->
<td></td>
<td></td>
<!-- Result metrics -->
<td></td>
<td></td>
<td>%(mean_fpr)s</td>
<td>%(mean_fnr)s</td>
<td>%(mean_tv)s</td>
<td>%(mean_am)s</td>
<td>%(mean_time)s</td>
</tr>
</tfoot>
"""
# Navigation and links to plot.
DETAILS = """\
<p style="text-align: right">
<a href="#top">Up</a>
</p>
<a id="%(anchor)s"></a>
<p style="text-align: center">
<img src="%(instance_dir)s/dist.png"/>
</p>
<p>
<a href="%(instance_dir)s">%(name)s files</a>
</p>
"""
# Plots comparing simulations
PLOTS = """ \
<h2>Plots</h2>
<h3 style="text-align: center">Total variation distance</h3>
<p style="text-align: center">
<img src="plots/tv.png"/>
</p>
<h3 style="text-align: center">False negative rate</h3>
<p style="text-align: center">
<img src="plots/fnr.png"/>
</p>
<h3 style="text-align: center">False positive rate</h3>
<p style="text-align: center">
<img src="plots/fpr.png"/>
</p>
<h3 style="text-align: center">Allocated mass</h3>
<p style="text-align: center">
<img src="plots/am.png"/>
</p>
<h3 style="text-align: center">Time</h3>
<p style="text-align: center">
<img src="plots/time.png"/>
</p>
"""
def FormatFloat(x, percent):
"""Formats a floating-point number."""
if percent:
return '{:.1f}%'.format(x * 100.0)
else:
return '{:.3f}'.format(x)
def FormatMeanWithSem(m_std_error, percent=False):
"""Formats an estimate with standard error."""
if m_std_error is None:
return ''
m, std_error = m_std_error
if std_error is None:
return FormatFloat(m, percent)
else:
return '{}±{}'.format(
FormatFloat(m, percent),
FormatFloat(std_error, percent))
def Mean(l):
"""Computes the mean (average) for a list of numbers."""
if l:
return float(sum(l)) / len(l)
else:
return None
def SampleVar(l):
"""Computes the sample variance for a list of numbers."""
if len(l) > 1:
mean = Mean(l)
var = sum([(x - mean) ** 2 for x in l]) / (len(l) - 1)
return var
else:
return None
def StandardErrorEstimate(l):
"""Returns the standard error estimate for a list of numbers.
For a singleton the standard error is assumed to be 10% of its value.
"""
if len(l) > 1:
return (SampleVar(l) / len(l)) ** .5
elif l:
return l[0] / 10.0
else:
return None
def MeanOfMeans(dict_of_lists):
"""Returns the average of averages with the standard error of the estimate.
"""
means = [Mean(dict_of_lists[key]) for key in dict_of_lists
if dict_of_lists[key]]
if means:
# Compute variances of the estimate for each sublist.
se = [StandardErrorEstimate(dict_of_lists[key]) ** 2 for key
in dict_of_lists if dict_of_lists[key]]
return (Mean(means), # Mean over all sublists
sum(se) ** .5 / len(se)) # Standard deviation of the mean
else:
return None
def ParseSpecFile(spec_filename):
"""Parses the spec (parameters) file.
Returns:
An integer and a string. The integer is the number of bogus candidates
and the string is parameters in the HTML format.
"""
with open(spec_filename) as s:
spec_row = s.readline().split()
# Second to last column is 'num_additional' -- the number of bogus
# candidates added
num_additional = int(spec_row[-2])
spec_in_html = ' '.join('<td>%s</td>' % cell for cell in spec_row[1:])
return num_additional, spec_in_html
def ExtractTime(log_filename):
"""Extracts the elapsed time information from the log file.
Returns:
Elapsed time (in seconds) or None in case of failure.
"""
if os.path.isfile(log_filename):
with open(log_filename) as log:
log_str = log.read()
# Matching a line output by analyze.R.
match = re.search(r'Inference took ([0-9.]+) seconds', log_str)
if match:
return float(match.group(1))
return None
def ParseMetrics(metrics_file, log_file, num_additional):
"""Processes the metrics file.
Args:
metrics_file: name of the metrics file
log_file: name of the log.txt file
num_additional: A number of bogus candidates added to the candidate list.
Returns a pair:
- A dictionary of metrics (some can be []).
- An HTML-formatted portion of the report row.
"""
if not os.path.isfile(metrics_file):
metrics_row_str = ['', '', '', '', '', '']
metrics_row_dict = {}
else:
with open(metrics_file) as m:
m.readline()
metrics_row = m.readline().split(',')
(num_actual, num_rappor, num_false_pos, num_false_neg, total_variation,
allocated_mass) = metrics_row
num_actual = int(num_actual)
num_rappor = int(num_rappor)
num_false_pos = int(num_false_pos)
num_false_neg = int(num_false_neg)
total_variation = float(total_variation)
allocated_mass = float(allocated_mass)
# e.g. if there are 20 additional candidates added, and 1 false positive,
# the false positive rate is 5%.
fp_rate = float(num_false_pos) / num_additional if num_additional else 0
# e.g. if there are 100 strings in the true input, and 80 strings
# detected by RAPPOR, then we have 20 false negatives, and a false
# negative rate of 20%.
fn_rate = float(num_false_neg) / num_actual
metrics_row_str = [
str(num_actual),
str(num_rappor),
'%.1f%% (%d)' % (fp_rate * 100, num_false_pos) if num_additional
else '',
'%.1f%% (%d)' % (fn_rate * 100, num_false_neg),
'%.3f' % total_variation,
'%.3f' % allocated_mass,
]
metrics_row_dict = {
'tv': [total_variation],
'fpr': [fp_rate] if num_additional else [],
'fnr': [fn_rate],
'am': [allocated_mass],
}
elapsed_time = ExtractTime(log_file)
if elapsed_time is not None:
metrics_row_str = metrics_row_str + ['%.2f' % elapsed_time]
metrics_row_dict['time'] = [elapsed_time]
# return metrics formatted as HTML table entries
return (metrics_row_dict,
' '.join('<td>%s</td>' % cell for cell in metrics_row_str))
def FormatCell1(test_case, test_instance, metrics_file, log_file, plot_file,
link_to_plots):
"""Outputs an HTML table entry for the first cell of the row.
The row is filled if the metrics file exist. The first cell contains a link
that for short tables points to a plot file inline, for large tables to an
external file.
If the metrics file is missing, the link points to the log file (if one
exists)
"""
relpath_report = '{}/{}_report'.format(test_case, test_instance)
if os.path.isfile(metrics_file):
external_file = plot_file
if link_to_plots:
link = '#{}_{}'.format(test_case, test_instance) # anchor
else:
link = os.path.join(relpath_report, 'dist.png')
else: # no results likely due to an error, puts a link to the log file
external_file = log_file
link = os.path.join(relpath_report, 'log.txt')
if os.path.isfile(external_file):
return '<td><a href="{}">{}</a></td>'.format(link, test_case)
else: # if no file to link to
return '<td>{}</td>'.format(test_case)
def plots(metrics_lists, base_dir):
for k, v in metrics_lists.iteritems():
_makeplot(k, v, base_dir)
def _makeplot(title, values, base_dir):
plt.figure()
plt.title(title)
vals = []
x_legend = values.keys()
x_legend.sort()
for k in x_legend:
vals.append(np.mean(values[k]))
x = range(len(vals))
fig, ax = plt.subplots(1)
fig.autofmt_xdate()
plt.xticks(x, x_legend)
plt.xlabel('Simulation')
plt.ylabel('Value')
plt.grid(True)
plt.tight_layout()
plt.plot(x, vals)
if not os.path.exists('{}/plots'.format(base_dir)):
os.makedirs('{}/plots'.format(base_dir))
plt.savefig('{}/plots/{}.png'.format(base_dir, title))
def FormatSummaryRow(metrics_lists):
"""Outputs an HTML-formatted summary row."""
means_with_sem = {} # SEM - standard error of the mean
for key in metrics_lists:
means_with_sem[key] = MeanOfMeans(metrics_lists[key])
# If none of the lists is longer than one element, drop the SEM component.
if means_with_sem[key] and max([len(l) for l in metrics_lists[key]]) < 2:
means_with_sem[key] = [means_with_sem[key][0], None]
summary = {
'name': 'Means',
'mean_fpr': FormatMeanWithSem(means_with_sem['fpr'], percent=True),
'mean_fnr': FormatMeanWithSem(means_with_sem['fnr'], percent=True),
'mean_tv': FormatMeanWithSem(means_with_sem['tv'], percent=True),
'mean_am': FormatMeanWithSem(means_with_sem['am'], percent=True),
'mean_time': FormatMeanWithSem(means_with_sem['time']),
}
return SUMMARY_ROW % summary
def FormatPlots(base_dir, test_instances):
"""Outputs HTML-formatted plots."""
result = ''
for instance in test_instances:
# A test instance is identified by the test name and the test run.
test_case, test_instance, _ = instance.split(' ')
instance_dir = test_case + '/' + test_instance + '_report'
if os.path.isfile(os.path.join(base_dir, instance_dir, 'dist.png')):
result += DETAILS % {'anchor': test_case + '_' + test_instance,
'name': '{} (instance {})'.format(test_case,
test_instance),
'instance_dir': instance_dir}
return result
def main(argv):
base_dir = argv[1]
output_file = open(argv[2], 'w')
# This file has the test case names, in the order that they should be
# displayed.
instances_file = os.path.join(base_dir, 'test-instances.txt')
if not os.path.isfile(instances_file):
raise RuntimeError('{} is missing'.format(instances_file))
with open(instances_file) as f:
test_instances = [line.strip() for line in f]
# Metrics are assembled into a dictionary of dictionaries. The top-level
# key is the metric name ('tv', 'fpr', etc.), the second level key is
# the test case. These keys reference a list of floats, which can be empty.
metrics = {
'tv': {}, # total_variation for all test cases
'fpr': {}, # dictionary of false positive rates
'fnr': {}, # dictionary of false negative rates
'am': {}, # dictionary of total allocated masses
'time': {}, # dictionary of total elapsed time measurements
}
# If there are too many tests, the plots are not included in the results
# file. Instead, rows' names are links to the corresponding .png files.
include_plots = len(test_instances) < 20
instances_succeeded = 0
instances_failed = 0
instances_running = 0
for instance in test_instances:
# A test instance is idenfied by the test name and the test run.
test_case, test_instance, _ = instance.split(' ')
spec_file = os.path.join(base_dir, test_case, 'spec.txt')
if not os.path.isfile(spec_file):
raise RuntimeError('{} is missing'.format(spec_file))
num_additional, spec_html = ParseSpecFile(spec_file)
metrics_html = '' # will be filled in later on, if metrics exist
report_dir = os.path.join(base_dir, test_case, test_instance + '_report')
metrics_file = os.path.join(report_dir, 'metrics.csv')
log_file = os.path.join(report_dir, 'log.txt')
plot_file = os.path.join(report_dir, 'dist.png')
cell1_html = FormatCell1(test_case, test_instance, metrics_file, | |
1 400 401
1 400 2 1 399 401
1 401 0 0
1 402 2 1 403 404
1 403 2 1 402 404
1 404 0 0
1 405 2 1 406 407
1 406 2 1 405 407
1 407 0 0
1 408 2 1 409 410
1 409 2 1 408 410
1 410 0 0
1 411 2 1 412 413
1 412 2 1 411 413
1 413 0 0
1 414 2 1 415 416
1 415 2 1 414 416
1 416 0 0
1 417 2 1 418 419
1 418 2 1 417 419
1 419 0 0
1 420 2 1 421 422
1 421 2 1 420 422
1 422 0 0
1 423 2 1 424 425
1 424 2 1 423 425
1 425 0 0
1 426 2 1 427 428
1 427 2 1 426 428
1 428 0 0
1 429 2 1 430 431
1 430 2 1 429 431
1 431 0 0
1 432 2 1 433 434
1 433 2 1 432 434
1 434 0 0
1 435 2 1 436 437
1 436 2 1 435 437
1 437 0 0
1 438 2 1 439 440
1 439 2 1 438 440
1 440 0 0
1 441 2 1 442 443
1 442 2 1 441 443
1 443 0 0
1 444 2 1 445 446
1 445 2 1 444 446
1 446 0 0
1 447 2 1 448 449
1 448 2 1 447 449
1 449 0 0
1 450 2 1 451 452
1 451 2 1 450 452
1 452 0 0
1 453 2 1 454 455
1 454 2 1 453 455
1 455 0 0
1 456 2 1 457 458
1 457 2 1 456 458
1 458 0 0
1 459 2 1 460 461
1 460 2 1 459 461
1 461 0 0
1 462 2 1 463 464
1 463 2 1 462 464
1 464 0 0
1 465 2 1 466 467
1 466 2 1 465 467
1 467 0 0
1 468 2 1 469 470
1 469 2 1 468 470
1 470 0 0
1 471 2 1 472 473
1 472 2 1 471 473
1 473 0 0
1 474 2 1 475 476
1 475 2 1 474 476
1 476 0 0
1 477 2 1 478 479
1 478 2 1 477 479
1 479 0 0
1 480 2 1 481 482
1 481 2 1 480 482
1 482 0 0
1 483 2 1 484 485
1 484 2 1 483 485
1 485 0 0
1 486 2 1 487 488
1 487 2 1 486 488
1 488 0 0
1 489 2 1 490 491
1 490 2 1 489 491
1 491 0 0
1 492 2 1 493 494
1 493 2 1 492 494
1 494 0 0
1 495 2 1 496 497
1 496 2 1 495 497
1 497 0 0
1 498 2 1 499 500
1 499 2 1 498 500
1 500 0 0
1 501 2 1 502 503
1 502 2 1 501 503
1 503 0 0
1 504 2 1 505 506
1 505 2 1 504 506
1 506 0 0
1 507 2 1 508 509
1 508 2 1 507 509
1 509 0 0
1 510 2 1 511 512
1 511 2 1 510 512
1 512 0 0
1 513 2 1 514 515
1 514 2 1 513 515
1 515 0 0
1 516 2 1 517 518
1 517 2 1 516 518
1 518 0 0
1 519 2 1 520 521
1 520 2 1 519 521
1 521 0 0
1 522 2 1 523 524
1 523 2 1 522 524
1 524 0 0
1 525 2 1 526 527
1 526 2 1 525 527
1 527 0 0
1 528 2 1 529 530
1 529 2 1 528 530
1 530 0 0
1 531 2 1 532 533
1 532 2 1 531 533
1 533 0 0
1 534 2 1 535 536
1 535 2 1 534 536
1 536 0 0
1 537 2 1 538 539
1 538 2 1 537 539
1 539 0 0
1 540 2 1 541 542
1 541 2 1 540 542
1 542 0 0
1 543 2 1 544 545
1 544 2 1 543 545
1 545 0 0
1 546 2 1 547 548
1 547 2 1 546 548
1 548 0 0
1 549 2 1 550 551
1 550 2 1 549 551
1 551 0 0
1 552 2 1 553 554
1 553 2 1 552 554
1 554 0 0
1 555 2 1 556 557
1 556 2 1 555 557
1 557 0 0
1 558 2 1 559 560
1 559 2 1 558 560
1 560 0 0
1 561 2 1 562 563
1 562 2 1 561 563
1 563 0 0
1 564 2 1 565 566
1 565 2 1 564 566
1 566 0 0
1 567 2 1 568 569
1 568 2 1 567 569
1 569 0 0
1 570 2 1 571 572
1 571 2 1 570 572
1 572 0 0
1 573 2 1 574 575
1 574 2 1 573 575
1 575 0 0
1 576 2 1 577 578
1 577 2 1 576 578
1 578 0 0
1 579 2 1 580 581
1 580 2 1 579 581
1 581 0 0
1 582 2 1 583 584
1 583 2 1 582 584
1 584 0 0
1 585 2 1 586 587
1 586 2 1 585 587
1 587 0 0
1 588 2 1 589 590
1 589 2 1 588 590
1 590 0 0
1 591 2 1 592 593
1 592 2 1 591 593
1 593 0 0
1 594 2 1 595 596
1 595 2 1 594 596
1 596 0 0
1 597 2 1 598 599
1 598 2 1 597 599
1 599 0 0
1 600 2 1 601 602
1 601 2 1 600 602
1 602 0 0
1 603 2 1 604 605
1 604 2 1 603 605
1 605 0 0
1 606 2 1 607 608
1 607 2 1 606 608
1 608 0 0
1 609 2 1 610 611
1 610 2 1 609 611
1 611 0 0
1 612 2 1 613 614
1 613 2 1 612 614
1 614 0 0
1 615 2 1 616 617
1 616 2 1 615 617
1 617 0 0
1 618 2 1 619 620
1 619 2 1 618 620
1 620 0 0
1 621 2 1 622 623
1 622 2 1 621 623
1 623 0 0
1 624 2 1 625 626
1 625 2 1 624 626
1 626 0 0
1 627 2 1 628 629
1 628 2 1 627 629
1 629 0 0
1 630 2 1 631 632
1 631 2 1 630 632
1 632 0 0
1 633 2 1 634 635
1 634 2 1 633 635
1 635 0 0
1 636 2 1 637 638
1 637 2 1 636 638
1 638 0 0
1 639 2 1 640 641
1 640 2 1 639 641
1 641 0 0
1 642 2 1 643 644
1 643 2 1 642 644
1 644 0 0
1 645 2 1 646 647
1 646 2 1 645 647
1 647 0 0
1 648 2 1 649 650
1 649 2 1 648 650
1 650 0 0
1 651 2 1 652 653
1 652 2 1 651 653
1 653 0 0
1 654 2 1 655 656
1 655 2 1 654 656
1 656 0 0
1 657 2 1 658 659
1 658 2 1 657 659
1 659 0 0
1 660 2 1 661 662
1 661 2 1 660 662
1 662 0 0
1 663 2 1 664 665
1 664 2 1 663 665
1 665 0 0
1 666 2 1 667 668
1 667 2 1 666 668
1 668 0 0
1 669 2 1 670 671
1 670 2 1 669 671
1 671 0 0
1 672 2 1 673 674
1 673 2 1 672 674
1 674 0 0
1 675 2 1 676 677
1 676 2 1 675 677
1 677 0 0
1 678 2 1 679 680
1 679 2 1 678 680
1 680 0 0
1 681 2 1 682 683
1 682 2 1 681 683
1 683 0 0
1 684 2 1 685 686
1 685 2 1 684 686
1 686 0 0
1 687 2 1 688 689
1 688 2 1 687 689
1 689 0 0
1 690 2 1 691 692
1 691 2 1 690 692
1 692 0 0
1 693 2 1 694 695
1 694 2 1 | |
bin)
# exact matches match the first bin
# suffix matches match an arbitrary bin (which is ok, because
# the suffix lists should be disjoint)
if self.needs_domain_exact_match and domain_exact_match_bin is not None:
domain_suffix_match_bin = domain_exact_match_bin
else:
# check for a suffix match
# this is O(N), but obviously correct
# assert suffix_match(domain_suffix_obj, remote_host) == any([remote_host.endswith("." + domain) for domain in domain_exact_obj])
# this is O(D), where D is the number of domain components
# The TS guarantees that the lists are disjoint
domain_suffix_match_bin = suffix_match(self.domain_suffix_obj,
remote_host,
separator=".")
if domain_suffix_match_bin == 0:
suffix_match_str = "DomainSuffixMatch"
else:
suffix_match_str = "DomainNoSuffixMatch"
# collect suffix match / no match counts for the first list
self._increment_stream_end_histograms(suffix_match_str + stream_web + stream_circ,
totalbw, writebw, readbw,
ratio, lifetime)
# collect suffix match counts per list
self._increment_stream_end_count_lists("DomainSuffixMatch" + stream_web + stream_circ,
domain_suffix_match_bin,
totalbw, writebw,
readbw)
#logging.debug("class: {} web: {} IP: {} Host: {} {} Stream: {} Exact: {} Suffix: {}"
# .format(stream_class, stream_web,
# ip_version,
# host_ip_version, remote_host,
# stream_circ,
# domain_exact_match_bin,
# domain_suffix_match_bin))
return True
@staticmethod
def _is_port_web(port):
'''
Return True if port is Web.
'''
return port == 80 or port == 443
INTERACTIVE_PORTS = set([22, 194, 994, 6660, 6661, 6662, 6663, 6664, 6665, 6666, 6667, 6668, 6669, 6670, 6679, 6697, 7000])
P2P_PORT_CACHE = None
@staticmethod
def _p2p_port_set():
'''
Return a set containing the P2P ports.
'''
if Aggregator.P2P_PORT_CACHE is not None:
return Aggregator.P2P_PORT_CACHE
p2p_ports = [1214]
for p in xrange(4661, 4666+1): p2p_ports.append(p)
for p in xrange(6346, 6429+1): p2p_ports.append(p)
p2p_ports.append(6699)
for p in xrange(6881, 6999+1): p2p_ports.append(p)
p2p_ports.append(45682) # utorrent
p2p_ports.append(51413) # transmission
Aggregator.P2P_PORT_CACHE = set(p2p_ports)
return Aggregator.P2P_PORT_CACHE
@staticmethod
def _classify_port(port):
'''
Classify port into Web, Interactive, P2P, or OtherPort.
'''
if Aggregator._is_port_web(port):
return 'Web'
elif port in Aggregator.INTERACTIVE_PORTS:
return 'Interactive'
elif port in Aggregator._p2p_port_set():
return 'P2P'
else:
return 'OtherPort'
@staticmethod
def _classify_port_web(port):
'''
Classify port into Web or NonWeb.
'''
return 'Web' if Aggregator._is_port_web(port) else 'NonWeb'
@staticmethod
def _encode_ratio(inval, outval):
'''
Calculate the log ratio between inbound and outbound traffic.
Positive when outval > inval, and negative when inval > outval.
Returns a non-infinite floating point value:
- zero when inval and outval are zero,
- a large negative number (< -100) when outval is zero, and
- a large positive number (> 100) when inval is zero, and
- log(base 2)(outval/inval) otherwise.
'''
inval = float(inval)
outval = float(outval)
if inval == 0.0 and outval == 0.0:
return 0.0
elif inval == 0.0:
return sys.float_info.max_exp
elif outval == 0.0:
return sys.float_info.min_exp
else:
return math.log(outval/inval, 2)
@staticmethod
def _compute_interstream_creation_times(start_times):
'''
Sort start_times, and return a list of the differences between each
pair of times.
'''
start_times.sort()
isc_times = []
for i in xrange(len(start_times)):
if i == 0: continue
isc_times.append(start_times[i] - start_times[i-1])
return isc_times
@staticmethod
def _is_circuit_active(ncellsin, ncellsout):
'''
Return true if a circuit with ncellsin inbound cells and ncellsout
outbound cells is considered active.
'''
return ncellsin + ncellsout >= 8
# The legacy event is still processed by the injector, but is ignored
# by PrivCount 1.1.0 and later
CIRCUIT_ENDED_ITEMS = 12
def _handle_legacy_exit_circuit_event(self, fields, event_desc):
'''
Increment circuit-level counters and store circuit data for later
processing. This is the legacy code that used to process the
PRIVCOUNT_CIRCUIT_ENDED event, and now processes Exit events sent to
it via _handle_circuit_close_event.
'''
event_desc = event_desc + " processing legacy circuit end event counters"
# Extract the field values we want
chanid = get_int_value("PreviousChannelId",
fields, event_desc,
is_mandatory=False)
circid = get_int_value("PreviousCircuitId",
fields, event_desc,
is_mandatory=False)
# Get cell counts
(ncellsin, ncellsout) = Aggregator._get_cell_counts(fields, event_desc)
readbwexit = get_int_value("InboundExitByteCount",
fields, event_desc,
is_mandatory=False)
writebwexit = get_int_value("OutboundExitByteCount",
fields, event_desc,
is_mandatory=False)
start = get_float_value("CreatedTimestamp",
fields, event_desc,
is_mandatory=False)
end = get_float_value("EventTimestamp",
fields, event_desc,
is_mandatory=False)
previp = get_ip_address_value("PreviousNodeIPAddress",
fields, event_desc,
is_mandatory=False,
default="0.0.0.0")
prevIsClient = get_flag_value("IsEntryFlag",
fields, event_desc,
is_mandatory=False,
default=False)
nextip = get_ip_address_value("NextNodeIPAddress",
fields, event_desc,
is_mandatory=False,
default="0.0.0.0")
nextIsEdge = get_flag_value("IsExitFlag",
fields, event_desc,
is_mandatory=False,
default=False)
# check they are all present
if (chanid is None or circid is None or ncellsin is None or
ncellsout is None or readbwexit is None or writebwexit is None or
start is None or end is None or previp is None or
prevIsClient is None or nextip is None or nextIsEdge is None):
logging.warning("Unexpected missing field {}".format(event_desc))
return False
# Now process using the legacy code
# we get circuit events on both exits and entries
# stream bw info is only avail on exits
if prevIsClient:
# prev hop is a client, we are entry
# is this circuit active, based on its cell counts?
# non-exit circuits only see cells
is_active = Aggregator._is_circuit_active(ncellsin, ncellsout)
# count unique client ips
# we saw this client within current rotation window
self.cli_ips_current.setdefault(previp, {'is_active':False})
if is_active:
self.cli_ips_current[previp]['is_active'] = True
if start < self.cli_ips_rotated:
# we also saw the client in the previous rotation window
self.cli_ips_previous.setdefault(previp, {'is_active':False})
if is_active:
self.cli_ips_previous[previp]['is_active'] = True
# count number of completed circuits per client
self.cli_ips_current[previp].setdefault('num_active_completed',
0)
self.cli_ips_current[previp].setdefault('num_inactive_completed',
0)
if is_active:
self.cli_ips_current[previp]['num_active_completed'] += 1
else:
self.cli_ips_current[previp]['num_inactive_completed'] += 1
elif nextIsEdge:
# prev hop is a relay and next is an edge connection, we are exit
# don't count single-hop exits
# check if we have any stream info in this circuit
circ_is_known = self.is_circ_known(chanid=chanid, circid=circid)
has_completed_stream = False
if circ_is_known:
if sum(self.circ_info[chanid][circid]['num_streams'].values()) > 0:
has_completed_stream = True
if circ_is_known and has_completed_stream:
# we have circuit info and at least one stream ended on it
# all Exit circuits should pass this test
# convenience
counts = self.circ_info[chanid][circid]['num_streams']
times = self.circ_info[chanid][circid]['stream_starttimes']
# first increment general counters
self.secure_counters.increment('ExitCircuitStreamHistogram',
bin=sum(counts.values()),
inc=1)
for isct in Aggregator._compute_interstream_creation_times(times['Web'] + times['Interactive'] + times['P2P'] + times['OtherPort']):
self.secure_counters.increment('ExitCircuitInterStreamCreationTime',
bin=isct,
inc=1)
# now only increment the classes that have positive counts
if counts['Web'] > 0:
self.secure_counters.increment('ExitWebCircuitCount',
bin=SINGLE_BIN,
inc=1)
self.secure_counters.increment('ExitCircuitWebStreamHistogram',
bin=counts['Web'],
inc=1)
for isct in Aggregator._compute_interstream_creation_times(times['Web']):
self.secure_counters.increment('ExitCircuitWebInterStreamCreationTime',
bin=isct,
inc=1)
if counts['Interactive'] > 0:
self.secure_counters.increment('ExitInteractiveCircuitCount',
bin=SINGLE_BIN,
inc=1)
self.secure_counters.increment('ExitCircuitInteractiveStreamHistogram',
bin=counts['Interactive'],
inc=1)
for isct in Aggregator._compute_interstream_creation_times(times['Interactive']):
self.secure_counters.increment('ExitCircuitInteractiveInterStreamCreationTime',
bin=isct,
inc=1)
if counts['P2P'] > 0:
self.secure_counters.increment('ExitP2PCircuitCount',
bin=SINGLE_BIN,
inc=1)
self.secure_counters.increment('ExitCircuitP2PStreamHistogram',
bin=counts['P2P'],
inc=1)
for isct in Aggregator._compute_interstream_creation_times(times['P2P']):
self.secure_counters.increment('ExitCircuitP2PInterStreamCreationTime',
bin=isct,
inc=1)
if counts['OtherPort'] > 0:
self.secure_counters.increment('ExitOtherPortCircuitCount',
bin=SINGLE_BIN,
inc=1)
self.secure_counters.increment('ExitCircuitOtherPortStreamHistogram',
bin=counts['OtherPort'],
inc=1)
for isct in Aggregator._compute_interstream_creation_times(times['OtherPort']):
self.secure_counters.increment('ExitCircuitOtherPortInterStreamCreationTime',
bin=isct,
inc=1)
# cleanup
# TODO: secure delete
if circ_is_known:
# remove circ from channel
self.circ_info[chanid].pop(circid, None)
# if that was the last circuit on channel, remove the channel too
if len(self.circ_info[chanid]) == 0:
self.circ_info.pop(chanid, None)
return True
# The legacy event is still processed by the injector, but is ignored
# by PrivCount 1.2.0 and later
CONNECTION_ENDED_ITEMS = 5
@staticmethod
def is_hs_version_valid(fields, event_desc,
is_mandatory=False,
prefix=None):
'''
Check that the HiddenServiceVersionNumber in fields exists, and is
2 or 3. Uses prefix before HiddenServiceVersionNumber if it is set.
See is_int_valid for details.
'''
if prefix is None:
prefix = ""
return is_int_valid("{}HiddenServiceVersionNumber".format(prefix),
fields, event_desc,
is_mandatory=is_mandatory,
min_value=2, max_value=3)
@staticmethod
def get_hs_version(fields, event_desc,
is_mandatory=False,
default=None):
'''
Check that fields["HiddenServiceVersionNumber"] exists and is valid.
If it is, return it as an integer.
Otherwise, if is_mandatory is True, assert.
Otherwise, return default.
See is_hs_version_valid for details.
'''
# This should have been checked earlier
assert Aggregator.is_hs_version_valid(fields, event_desc,
is_mandatory=is_mandatory)
return get_int_value("HiddenServiceVersionNumber",
fields, event_desc,
is_mandatory=is_mandatory,
default=default)
@staticmethod
def warn_unexpected_field_value(field_name, fields, event_desc):
'''
Called when we expect field_name to be a particular value, and it is
not. Log a warning containing field_name, fields[field_name]
(if available), and event_desc.
'''
if field_name in fields:
field_value = fields[field_name]
value_message = "{} value '{}'".format(field_name,
summarise_string(field_value))
full_value_message = "{} value (full value) '{}'".format(
field_name,
field_value)
else:
value_message = "missing {} value".format(field_name)
full_value_message = value_message
logging.warning("Unexpected {} {}. Maybe we should add a counter for it?"
.format(value_message, event_desc))
logging.debug("Unexpected {} {}. Maybe we should add a counter for it?"
.format(full_value_message, event_desc))
@staticmethod
def warn_unknown_counter(counter_name, origin_desc, event_desc):
'''
If counter_name is an unknown counter name, log a warning containing
origin_desc and event_desc.
'''
if counter_name not in get_valid_counters():
logging.warning("Ignored unknown counter {} from {} {}. Is your PrivCount Tor version newer than your PrivCount version?"
.format(counter_name, origin_desc, event_desc))
@staticmethod
| |
= None, on = None, left_on = None, right_on = None, batch_func = None, columns = None, suffix="_right"):
# how many things you might checkpoint, the number of keys in the dict
self.num_states = 2
self.trade = {}
self.quote = {}
self.ckpt_start0 = 0
self.ckpt_start1 = 0
self.columns = columns
self.suffix = suffix
if on is not None:
assert left_on is None and right_on is None
self.left_on = on
self.right_on = on
else:
assert left_on is not None and right_on is not None
self.left_on = left_on
self.right_on = right_on
if group_on is not None:
assert group_left_on is None and group_right_on is None
self.group_left_on = group_on
self.group_right_on = group_on
else:
assert group_left_on is not None and group_right_on is not None
self.group_left_on = group_left_on
self.group_right_on = group_right_on
self.batch_func = batch_func
# keys that will never be seen again, safe to delete from the state on the other side
def serialize(self):
result = {0:self.trade, 1:self.quote}
return result, "all"
def deserialize(self, s):
assert type(s) == list
self.trade = s[0][0]
self.quote = s[0][1]
def find_second_smallest(self, batch, key):
smallest = batch[0][key]
for i in range(len(batch)):
if batch[i][key] > smallest:
return batch[i][key]
# the execute function signature does not change. stream_id will be a [0 - (length of InputStreams list - 1)] integer
def execute(self,batches, stream_id, executor_id):
# state compaction
batches = [i for i in batches if len(i) > 0]
if len(batches) == 0:
return
# self.trade will be a dictionary of lists.
# self.quote will be a dictionary of lists.
# trade
ret_vals = []
if stream_id == 0:
for batch in batches:
frames = batch.partition_by(self.group_left_on)
for trade_chunk in frames:
symbol = trade_chunk["symbol"][0]
min_trade_ts = trade_chunk[self.left_on][0]
max_trade_ts = trade_chunk[self.left_on][-1]
if symbol not in self.quote:
if symbol in self.trade:
self.trade[symbol].append(trade_chunk)
else:
self.trade[symbol] = [trade_chunk]
continue
current_quotes_for_symbol = self.quote[symbol]
for i in range(len(current_quotes_for_symbol)):
quote_chunk = current_quotes_for_symbol[i]
min_quote_ts = quote_chunk[self.right_on][0]
max_quote_ts = quote_chunk[self.right_on][-1]
#print(max_trade_ts, min_quote_ts, min_trade_ts, max_quote_ts)
if max_trade_ts < min_quote_ts or min_trade_ts > max_quote_ts:
# no overlap.
continue
else:
second_smallest_quote_ts = self.find_second_smallest(quote_chunk, self.right_on)
joinable_trades = trade_chunk[(trade_chunk[self.left_on] >= second_smallest_quote_ts) & (trade_chunk[self.left_on] < max_quote_ts)]
if len(joinable_trades) == 0:
continue
trade_start_ts = joinable_trades[self.left_on][0]
trade_end_ts = joinable_trades[self.left_on][-1]
if len(joinable_trades) == 0:
continue
quote_start_ts = quote_chunk[self.right_on][quote_chunk[self.right_on] <= trade_start_ts][-1]
quote_end_ts = quote_chunk[self.right_on][quote_chunk[self.right_on] <= trade_end_ts][-1]
joinable_quotes = quote_chunk[(quote_chunk[self.right_on] >= quote_start_ts) & (quote_chunk[self.right_on] <= quote_end_ts)]
if len(joinable_quotes) == 0:
continue
trade_chunk = trade_chunk[(trade_chunk[self.left_on] < trade_start_ts) | (trade_chunk[self.left_on] > trade_end_ts)]
new_chunk = quote_chunk[(quote_chunk[self.right_on] < quote_start_ts) | (quote_chunk[self.left_on] > quote_end_ts)]
self.quote[symbol][i] = new_chunk
ret_vals.append(joinable_trades.join_asof(joinable_quotes.drop(self.group_right_on), left_on = self.left_on, right_on = self.right_on))
if len(trade_chunk) == 0:
break
self.quote[symbol] = [i for i in self.quote[symbol] if len(i) > 0]
if len(trade_chunk) == 0:
continue
if symbol in self.trade:
self.trade[symbol].append(trade_chunk)
else:
self.trade[symbol] = [trade_chunk]
#quote
elif stream_id == 1:
for batch in batches:
frames = batch.partition_by(self.group_right_on)
for quote_chunk in frames:
symbol = quote_chunk["symbol"][0]
min_quote_ts = quote_chunk[self.right_on][0]
max_quote_ts = quote_chunk[self.right_on][-1]
if symbol not in self.trade:
if symbol in self.quote:
self.quote[symbol].append(quote_chunk)
else:
self.quote[symbol] = [quote_chunk]
continue
current_trades_for_symbol = self.trade[symbol]
for i in range(len(current_trades_for_symbol)):
trade_chunk = current_trades_for_symbol[i]
#print(current_trades_for_symbol)
min_trade_ts = trade_chunk[self.left_on][0]
max_trade_ts = trade_chunk[self.left_on][-1]
if max_trade_ts < min_quote_ts or min_trade_ts > max_quote_ts:
# no overlap.
continue
else:
second_smallest_quote_ts = self.find_second_smallest(quote_chunk, self.right_on)
joinable_trades = trade_chunk[(trade_chunk[self.left_on] >= second_smallest_quote_ts) &( trade_chunk[self.left_on] < max_quote_ts)]
if len(joinable_trades) == 0:
continue
trade_start_ts = joinable_trades[self.left_on][0]
trade_end_ts = joinable_trades[self.left_on][-1]
if len(joinable_trades) == 0:
continue
quote_start_ts = quote_chunk[self.right_on][quote_chunk[self.right_on] <= trade_start_ts][-1]
quote_end_ts = quote_chunk[self.right_on][quote_chunk[self.right_on] <= trade_end_ts][-1]
joinable_quotes = quote_chunk[(quote_chunk[self.right_on] >= quote_start_ts) & (quote_chunk[self.right_on] <= quote_end_ts)]
if len(joinable_quotes) == 0:
continue
quote_chunk = quote_chunk[(quote_chunk[self.right_on] < quote_start_ts ) | (quote_chunk[self.left_on] > quote_end_ts)]
new_chunk = trade_chunk[(trade_chunk[self.left_on] < trade_start_ts) | (trade_chunk[self.left_on] > trade_end_ts)]
self.trade[symbol][i] = new_chunk
ret_vals.append(joinable_trades.join_asof(joinable_quotes.drop(self.group_right_on), left_on = self.left_on, right_on = self.right_on))
if len(quote_chunk) == 0:
break
self.trade[symbol] = [i for i in self.trade[symbol] if len(i) > 0]
if len(quote_chunk) == 0:
continue
if symbol in self.quote:
self.quote[symbol].append(quote_chunk)
else:
self.quote[symbol] = [quote_chunk]
#print(ret_vals)
if len(ret_vals) == 0:
return
for thing in ret_vals:
print(len(thing))
print(thing[thing.symbol=="ZU"])
result = polars.concat(ret_vals).drop_nulls()
if self.columns is not None and result is not None and len(result) > 0:
result = result[self.columns]
if result is not None and len(result) > 0:
if self.batch_func is not None:
da = self.batch_func(result.to_pandas())
return da
else:
print("RESULT LENGTH",len(result))
return result
def done(self,executor_id):
#print(len(self.state0),len(self.state1))
ret_vals = []
for symbol in self.trade:
if symbol not in self.quote:
continue
else:
trades = polars.concat(self.trade[symbol]).sort(self.left_on)
quotes = polars.concat(self.quote[symbol]).sort(self.right_on)
ret_vals.append(trades.join_asof(quotes.drop(self.group_right_on), left_on = self.left_on, right_on = self.right_on, suffix=self.suffix))
print("done asof join ", executor_id)
return polars.concat(ret_vals).drop_nulls()
class PolarJoinExecutor(Executor):
# batch func here expects a list of dfs. This is a quark of the fact that join results could be a list of dfs.
# batch func must return a list of dfs too
def __init__(self, on = None, left_on = None, right_on = None, batch_func = None, columns = None, suffix="_right"):
# how many things you might checkpoint, the number of keys in the dict
self.num_states = 2
self.state0 = None
self.state1 = None
self.ckpt_start0 = 0
self.ckpt_start1 = 0
self.columns = columns
self.suffix = suffix
if on is not None:
assert left_on is None and right_on is None
self.left_on = on
self.right_on = on
else:
assert left_on is not None and right_on is not None
self.left_on = left_on
self.right_on = right_on
self.batch_func = batch_func
# keys that will never be seen again, safe to delete from the state on the other side
def serialize(self):
result = {0:self.state0[self.ckpt_start0:] if (self.state0 is not None and len(self.state0[self.ckpt_start0:]) > 0) else None, 1:self.state1[self.ckpt_start1:] if (self.state1 is not None and len(self.state1[self.ckpt_start1:]) > 0) else None}
if self.state0 is not None:
self.ckpt_start0 = len(self.state0)
if self.state1 is not None:
self.ckpt_start1 = len(self.state1)
return result, "inc"
def deserialize(self, s):
assert type(s) == list
list0 = [i[0] for i in s if i[0] is not None]
list1 = [i[1] for i in s if i[1] is not None]
self.state0 = polars.concat(list0) if len(list0) > 0 else None
self.state1 = polars.concat(list1) if len(list1) > 0 else None
self.ckpt_start0 = len(self.state0) if self.state0 is not None else 0
self.ckpt_start1 = len(self.state1) if self.state1 is not None else 0
# the execute function signature does not change. stream_id will be a [0 - (length of InputStreams list - 1)] integer
def execute(self,batches, stream_id, executor_id):
# state compaction
batches = [i for i in batches if i is not None and len(i) > 0]
if len(batches) == 0:
return
batch = polars.concat(batches)
result = None
if stream_id == 0:
if self.state1 is not None:
try:
result = batch.join(self.state1,left_on = self.left_on, right_on = self.right_on ,how='inner', suffix=self.suffix)
except:
print(batch)
if self.state0 is None:
self.state0 = batch
else:
self.state0.vstack(batch, in_place = True)
elif stream_id == 1:
if self.state0 is not None:
result = self.state0.join(batch,left_on = self.left_on, right_on = self.right_on ,how='inner', suffix=self.suffix)
if self.state1 is None:
self.state1 = batch
else:
self.state1.vstack(batch, in_place = True)
if self.columns is not None and result is not None and len(result) > 0:
result = result[self.columns]
if result is not None and len(result) > 0:
if self.batch_func is not None:
da = self.batch_func(result.to_pandas())
return da
else:
#print("RESULT LENGTH",len(result))
return result
def done(self,executor_id):
#print(len(self.state0),len(self.state1))
print("done join ", executor_id)
class OOCJoinExecutor(Executor):
# batch func here expects a list of dfs. This is a quark of the fact that join results could be a list of dfs.
# batch func must return a list of dfs too
def __init__(self, on = None, left_on = None, right_on = None, left_primary = False, right_primary = False, batch_func = None):
self.state0 = PersistentStateVariable()
self.state1 = PersistentStateVariable()
if on is not None:
assert left_on is None and right_on is None
self.left_on = on
self.right_on = on
else:
assert left_on is not None and right_on is not None
self.left_on = left_on
self.right_on = right_on
self.batch_func = batch_func
# the execute function signature does not change. stream_id will be a [0 - (length of InputStreams list | |
from numpy import arctan, zeros, pi, real as re, imag as im, linspace,eye, prod, newaxis
from numpy import array as arr, exp, log, arange, diag, kron, savetxt, cumsum, argmax
from numpy.linalg import det, norm, solve
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from copy import deepcopy
from time import clock
from numpy.random import uniform, shuffle
from scipy import integrate
import matplotlib.patches as pat
import pylab as pl
def ncr(n,r):
if r < 0:
return 0
p,q = 1,1
for j in range(r):
p *= n-j
q *= j+1
return p//q
def ind_draw(o,m,n): #generator giving all ways to draw n elements from range(o,m) without replacement
if n>=0:
l = m-o
if n == 0:
yield []
elif n == l:
yield list(range(o,m))
else:
for k in range(o,m-n+1):
for wha in ind_draw(k+1,m,n-1):
yield [k]+wha
def disect(We): #We is l-dimensional tensor, returns the sum along all but each axes.
w = []
As = We.shape
l = len(As)
for j in range(l):
if As[j] >1:
w.append(We.sum(tuple(range(1,l-j))))
else:
w.append(arr([1]))
We = We.sum(0)
for j in range(l):
if As[j] >1: #don't calculate the full sum rule over and over again on trivial boxes. (empty or full)
w[j] /= We
return w
def boxicles(n,deps): #puts n particles in boxes with maximal capacity deps. oo=None was last input
M = len(deps)
if n == 0:
yield [0 for _ in range(M)]
else:
for preput in boxicles(n-1,deps):
for k in range(M):
#postput = [a for a in preput]
#postput[k] += 1
#yield postput
if preput[k]<deps[k]:
yield [preput[a]+int(a==k) for a in range(M)]
if preput[k]:
break
def TBA(T,c,chempot,givedens = True):
interpts = 101 #odd is best
bdys = 20
la = linspace(-bdys,bdys,interpts) #lambda
dla = la[1]-la[0]
ootp = 1/(2*pi)
lala = (la*la-chempot)/T
convo = dla*c/(pi*(c*c+(la[:,newaxis]-la[newaxis,:])**2))
tba = lambda eps: lala- eps +(convo*log(1+exp(-eps))[newaxis,:]).sum(axis=1)
exep = exp(fsolve(tba,zeros(interpts)))
ooexep = 1/(1+exep)
#plt.plot(la,ooexep)
tba2 = lambda rhop: rhop/(ootp+(convo*rhop[newaxis,:]).sum(axis=1))-ooexep
rhopsol = fsolve(tba2,0.15*ooexep)
#plt.plot(la,ooexep/rhopsol)
rhopsol -= min(rhopsol) #ensure non-negativity, despite numerical error
D = sum(rhopsol)*dla
if givedens:
return D
else:
rhot = ootp+(convo*rhopsol[newaxis,:]).sum(axis=1)
xi = [0]
for rj in rhot:
xi.append(xi[-1]+dla*rj)
xi = (arr(xi[1:])+arr(xi[:-1]))/2
xi -= xi[interpts//2]
return rhopsol/rhot, xi
def LL_gibbs(N,L,T,c,ss): #more closely recreates a gibbs ensemble from a finite set of states.
qngen = LL_thermal_disc(N,L,T,c,200*ss)
ensemble = []
pref = 2*pi/L
for qn in qngen:
aqn = arr(qn)
lam,_ = newtrap(aqn,L,c,aqn*pref)
ensemble.append([sum(lam*lam),qn])
ensemble.sort()
h=1
while h<len(ensemble):
if ensemble[h-1][0]==ensemble[h][0]:
if ensemble[h-1][1]==ensemble[h][1]:
ensemble.pop(h)
h += 1
energies = arr([e[0] for e in ensemble])
prolly = cumsum(exp(-energies/T))
prolly /= prolly[-1]
#plt.hist(energies,bins=linspace(0,150,100))
#plt.plot(prolly)
for _ in range(ss):
yield ensemble[argmax(prolly>uniform())][1]
def LL_thermal_disc(N,L,T,c,samplesize):
if N==0:
for _ in range(samplesize):
yield []
else:
dens = N/L
chempot = fsolve(lambda chemp: TBA(T,c,chemp)-dens,1)
#plt.plot([TBA(T,c,ch) for ch in linspace(-10,10,100)])
rhox,xi = TBA(T,c,chempot,False)
pref = 1/L
#dom = max(1000,T/L)
#print(xi[0],xi[-1])
nf = lambda k : smirt(xi,rhox,k)
#KX = linspace(-10,10,1000)
#plt.plot(KX,[nf(kx) for kx in KX])
#boundbox = int(fsolve(lambda bd: integrate.quad(nf,-bd*pref,bd*pref)[0]-0.99*dens,L/2)[0]+2) #find the Qn inside which 99.5% of the particles should be
boundbox = int(xi[-1]*L)
for _ in range(samplesize):
if N%2:
I = [0]
index = 1
else:
I = []
index = 0.5
sign = 1
newreject = []
while len(I) < N and index<boundbox:
ki = index*pref
if uniform()<nf(ki):
I.append(sign*index)
else:
newreject.append(sign*index)
if sign == 1:
sign = -1
else:
sign = 1
index += 1
while len(I) < N:
shuffle(newreject)
reject = newreject
shuffle(reject)
rejlen,rejind = len(reject),0
newreject = []
while len(I) < N and rejind<rejlen:
if uniform()<nf(pref*reject[rejind]):
I.append(reject[rejind])
else:
newreject.append(reject[rejind])
rejind +=1
if uniform()<0.5:
I = [-ii for ii in I]
yield sorted(I)
def smirt(x,y,a): # y(x) irregularly spaced, x in increasing tho. a is desired x coordinate: interpolate
n = len(y)-1
h = 0
if a<x[0] or a>x[-1]:
return 0
while x[h+1]<a and h<n:
h += 1
return y[h]+(y[h+1]-y[h])*(a-x[h])/(x[h+1]-x[h])
def fd_better_disc(N,L,T,samplesize):
pref = 2*pi/L
if L==0:
for _ in range(samplesize):
yield []
else:
beta = 0.5/T
#dom = max(1000,T/L)
dom = 100
dens = 2*pi*N/L
mu = fsolve(lambda moo: integrate.quad(lambda k: 1/(1+exp((k*k-moo)*beta)),-dom,dom)[0]-dens,0)[0]
nf = lambda k : 1/(1+exp((k*k-mu)*beta))
boundbox = int(fsolve(lambda bd: integrate.quad(nf,-bd*pref,bd*pref)[0]-0.99*dens,L/2)[0]+2) #find the Qn inside which 99.5% of the particles should be
for _ in range(samplesize):
if N%2:
I = [0]
index = 1
else:
I = []
index = 0.5
sign = 1
newreject = []
while len(I) < N and index<boundbox:
ki = index*pref
if uniform()<nf(ki):
I.append(sign*index)
else:
newreject.append(sign*index)
if sign == 1:
sign = -1
else:
sign = 1
index += 1
while len(I) < N:
shuffle(newreject)
reject = newreject
shuffle(reject)
rejlen,rejind = len(reject),0
newreject = []
while len(I) < N and rejind<rejlen:
if uniform()<nf(pref*reject[rejind]):
I.append(reject[rejind])
else:
newreject.append(reject[rejind])
rejind +=1
yield sorted(I)
def gaudmat2(lam,leng,c): #gaudin matrix, needed to solve for rapidities
Ljk = lam[:,newaxis]-lam
K = 2*c/(Ljk*Ljk+c*c)
if len(lam)==0:
return 1
return eye(len(lam))*leng+diag(sum(K))-K
def bethLL2(lam,II,leng,ooc): #Lieb liniger repulsive, ooc = 1/c
return 2*(arctan((lam[:,newaxis]-lam)*ooc).sum(axis=1))-2*pi*II+lam*leng
def newtrap(II,L,c,lamguess=False): #execution time scales linearly with number of iterations. Worth the reduction.
if len(II)==0:
return arr([]),1,0
ooc = 1/c
if type(lamguess) != bool:
lam = lamguess + 0
else:
lam = arr([ai for ai in II],dtype=float)
tol = 10**-13*len(II)**0.5
res = bethLL2(lam,II,L,ooc)
iters = 0
gm = gaudmat2(lam,L,c)
while norm(res)>tol and iters<100:
lam += solve(gm,-res)
res = bethLL2(lam,II,L,ooc)
gm = gaudmat2(lam,L,c)
iters += 1
return lam,det(gm)#,iters
def getbackflows(lam,L,c):
N = len(lam)
return fsolve(dliv,eye(N).reshape(N*N),args=(lam,L,c)).reshape((N,N))
def dliv(G,lam,L,c):
N = len(lam)
gmat = G.reshape((N,N))
al = lam-lam[:,newaxis]
chi = 2*c/(c*c+al*al)
return (gmat*(L+chi.sum(axis=1)[:,newaxis])-chi.dot(gmat)-2*pi*eye(N)).reshape(N*N)
def sashaLLdata(opi):
samplesize = 50
maxcomplex = 400000
dopsi = False
if dopsi:
NLNL = [16,24]
target = 0.98
else:
NLNL = [15]
target = 12
for T in [6]: #[1,3,
for NL in NLNL:#[13,17,21]:
for c in [4]:
qngen = LL_thermal_disc(NL,NL,T,c,10*samplesize)
opi.append([T,NL,c,[]])
h = 0
rej = 0
while h < samplesize:
qnn = next(qngen)
aaa = LLstate(NL,T,4,c,NL,dopsi=dopsi,qn=qnn)
if aaa.complexity < maxcomplex and aaa.complexity>0: #could have overflow
kloek = clock()
h += 1
print(h,end=': ')
aaa.prep()
aaa.hilbert_search(target)
print('seconds elapsed',int(clock()-kloek))
opi[-1][-1].append([qnn,aaa.states_done,deepcopy(aaa.operator)])
if not dopsi:
print('erows',len(aaa.operator[0]),'pcols',max([len(aaa.operator[0][j]) for j in range(len(aaa.operator[0]))]))
else:
rej += 1
if not dopsi:
opi[-1].append((aaa.dP,aaa.dE,aaa.gsenergy))
print('Done with',[T,NL,c],'rejected:',rej)
if dopsi:
avg = [arr([opi[k][3][j][2] for j in range(samplesize)]).sum(axis=0)/samplesize for k in range(len(opi))]
return avg #run stringsdelete(opi,avg):
#else:
# return opi
def getaxes(dPdEgsE,dfactor,ecut,DSF2,gslam):
bump = 0#exp(-20)
for j in range(1):
workdsf = DSF2[j][:ecut[j],:]
orishape = workdsf.shape
boundsE = arr([-dPdEgsE[2],-dPdEgsE[2]+orishape[0]*dPdEgsE[1]])/(gslam[-1]**2)
boundsP =( orishape[1]/2*dPdEgsE[0])/gslam[-1]
rebinned = log(rebin(workdsf,tuple([orishape[k]//dfactor[j][k] for k in [0,1]]))+bump)
print(max([max(plu) for plu in rebinned]))
print(rebinned.shape)
EE = linspace(boundsE[0],boundsE[1],rebinned.shape[0])
PP = linspace(-boundsP,boundsP,rebinned.shape[1])
boxsize = (PP[1]-PP[0])*(EE[1]-EE[0])
rebinned -= log(boxsize)
plt.figure()
plt.imshow(rebinned)
plt.colorbar()
# np.savetxt('./hydrostat.csv', np.column_stack(( np.reshape(X, (lx*ly,1)), np.reshape(Y, (lx*ly,1)), np.reshape(T1, (lx*ly,1)) )), header='x, y, T', comments='# ',delimiter=',', newline='\n' )
#with open('DSF_T'+['1','3','6'][j]+'.dat', 'wb') as your_dat_file:
# your_dat_file.write(struct.pack('i'*len(rebinned), *rebinned))
#csvsurf('DSF_Tnew'+['1c','3c','6c'][j+2],rebinned,EE,PP)
return PP,EE
def combinedsf(dsflist):
maxE = 0
maxP = 0
for operator in dsflist:
for half in operator:
maxE = max(maxE,len(half))
for erow in half:
maxP = max(maxP,len(erow))
total = zeros((maxE,2*maxP))
for operator in dsflist:
for pj,half in enumerate(operator):
for ej,erow in enumerate(half):
for pjj,pcol in enumerate(erow):
pk = (1-pj)*(maxP-1-pjj)+pj*(maxP+pjj)
total[ej,pk] += pcol
return total/len(dsflist)
def rebin(oldarr, new_shape):
shop = (new_shape[0], oldarr.shape[0] // new_shape[0],new_shape[1], oldarr.shape[1] // new_shape[1])
return oldarr.reshape(shop).sum(-1).sum(1)
def averageopi(taui):
avtau = []
xpts = 201
place = linspace(0,10,xpts) #change back to symmetric -10,10?
rpts = 5
ray = linspace(0,0.4,rpts)
raytime = kron(place,ray).reshape((xpts,rpts))
for tau in taui:
avtau.append(tau[:3])
op = 0
N = len(tau[3][0][0])
Place = 1j*place[:,newaxis]
Time = -0.5j*raytime
for state in tau[3]:
lam,_ = newtrap(arr(state[0]),N,4,arr(state[0])*2*pi/N)
energy = sum(lam*lam)
momi = sum(lam)
op += state[1]*exp(-momi*Place-energy*Time)
avtau[-1].append(op/len(tau[3]))
return avtau
def plotavopi(avopi):
ru=0
xx = linspace(0,10,201)
cols = ['firebrick','darkorange','forestgreen','dodgerblue','purple']
bgs = ['lightcoral','bisque','lightgreen','lightskyblue','plum']
j=0
for avo in avopi[ru::]:
error = 1-abs(avo[3][0][0])
plt.fill_between(xx, abs(avo[3][:,0])-error, abs(avo[3][:,0])+error,facecolor=bgs[j],edgecolor=cols[j],alpha=0.2)
j+=1
j=0
for avo in avopi[ru::]:
plt.plot(xx,abs(avo[3][:,0]),label='[T, N]='+str(avo[:2]),c=cols[j])
j+=1
plt.legend()
plt.gcf().set_size_inches(5,4)
plt.title('Thermal Lieb-Liniger '+r'$|\langle\psi(x)\psi(0)\rangle|; c=$'+str(avopi[0][2]))
plt.xlabel('x')
plt.ylabel(r'$|\langle\psi\psi\rangle|$')
plt.savefig('PsipsiLLc'+str(avopi[0][2])+'.png',dpi=500)
def LLpsistats(L,T,ss,etarget,ptarget,res,pez,ez):
erange = 1
prange = 1
c = 4
B = 4
wish = 0.9
count = 0
QNs = []
qngen = fd_better_disc(L,L,T,1000000)
while count < ss:
qnow = next(qngen)
if qnow not in QNs:
QNs.append(qnow)
aaa= LLstate(L,T,B,c,L,qn=qnow)
mom = sum(aaa.lam)
go = mom>ptarget-prange and mom<ptarget+prange and aaa.energy > etarget-erange and aaa.energy < etarget+erange
if aaa.complexity<1000000 and go:
count += 1
print('count=',count)
aaa.prep()
aaa.hilbert_search(wish)
res.append(deepcopy(aaa.operator))
| |
{}),
("__subclasscheck__", do_issubclass, return_true,
set(("__bases__",)), {}),
("__enter__", run_context, iden, set(), {"__exit__" : swallow}),
("__exit__", run_context, swallow, set(), {"__enter__" : iden}),
("__complex__", complex, complex_num, set(), {}),
("__format__", format, format_impl, set(), {}),
("__floor__", math.floor, zero, set(), {}),
("__trunc__", math.trunc, zero, set(), {}),
("__trunc__", int, zero, set(), {}),
("__ceil__", math.ceil, zero, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
("__round__", round, zero, set(), {}),
]
klasa Checker(object):
def __getattr__(self, attr, test=self):
test.fail("__getattr__ called przy {0}".format(attr))
def __getattribute__(self, attr, test=self):
jeżeli attr nie w ok:
test.fail("__getattribute__ called przy {0}".format(attr))
zwróć object.__getattribute__(self, attr)
klasa SpecialDescr(object):
def __init__(self, impl):
self.impl = impl
def __get__(self, obj, owner):
record.append(1)
zwróć self.impl.__get__(obj, owner)
klasa MyException(Exception):
dalej
klasa ErrDescr(object):
def __get__(self, obj, owner):
podnieś MyException
dla name, runner, meth_impl, ok, env w specials:
klasa X(Checker):
dalej
dla attr, obj w env.items():
setattr(X, attr, obj)
setattr(X, name, meth_impl)
runner(X())
record = []
klasa X(Checker):
dalej
dla attr, obj w env.items():
setattr(X, attr, obj)
setattr(X, name, SpecialDescr(meth_impl))
runner(X())
self.assertEqual(record, [1], name)
klasa X(Checker):
dalej
dla attr, obj w env.items():
setattr(X, attr, obj)
setattr(X, name, ErrDescr())
self.assertRaises(MyException, runner, X())
def test_specials(self):
# Testing special operators...
# Test operators like __hash__ dla which a built-in default exists
# Test the default behavior dla static classes
klasa C(object):
def __getitem__(self, i):
jeżeli 0 <= i < 10: zwróć i
podnieś IndexError
c1 = C()
c2 = C()
self.assertNieprawda(nie c1)
self.assertNotEqual(id(c1), id(c2))
hash(c1)
hash(c2)
self.assertEqual(c1, c1)
self.assertPrawda(c1 != c2)
self.assertNieprawda(c1 != c1)
self.assertNieprawda(c1 == c2)
# Note that the module name appears w str/repr, oraz that varies
# depending on whether this test jest run standalone albo z a framework.
self.assertGreaterEqual(str(c1).find('C object at '), 0)
self.assertEqual(str(c1), repr(c1))
self.assertNotIn(-1, c1)
dla i w range(10):
self.assertIn(i, c1)
self.assertNotIn(10, c1)
# Test the default behavior dla dynamic classes
klasa D(object):
def __getitem__(self, i):
jeżeli 0 <= i < 10: zwróć i
podnieś IndexError
d1 = D()
d2 = D()
self.assertNieprawda(nie d1)
self.assertNotEqual(id(d1), id(d2))
hash(d1)
hash(d2)
self.assertEqual(d1, d1)
self.assertNotEqual(d1, d2)
self.assertNieprawda(d1 != d1)
self.assertNieprawda(d1 == d2)
# Note that the module name appears w str/repr, oraz that varies
# depending on whether this test jest run standalone albo z a framework.
self.assertGreaterEqual(str(d1).find('D object at '), 0)
self.assertEqual(str(d1), repr(d1))
self.assertNotIn(-1, d1)
dla i w range(10):
self.assertIn(i, d1)
self.assertNotIn(10, d1)
# Test overridden behavior
klasa Proxy(object):
def __init__(self, x):
self.x = x
def __bool__(self):
zwróć nie not self.x
def __hash__(self):
zwróć hash(self.x)
def __eq__(self, other):
zwróć self.x == other
def __ne__(self, other):
zwróć self.x != other
def __ge__(self, other):
zwróć self.x >= other
def __gt__(self, other):
zwróć self.x > other
def __le__(self, other):
zwróć self.x <= other
def __lt__(self, other):
zwróć self.x < other
def __str__(self):
zwróć "Proxy:%s" % self.x
def __repr__(self):
zwróć "Proxy(%r)" % self.x
def __contains__(self, value):
zwróć value w self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
self.assertNieprawda(p0)
self.assertNieprawda(nie p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
self.assertNieprawda(p0 != p0)
self.assertEqual(nie p0, p1)
self.assertPrawda(p0 < p1)
self.assertPrawda(p0 <= p1)
self.assertPrawda(p1 > p0)
self.assertPrawda(p1 >= p0)
self.assertEqual(str(p0), "Proxy:0")
self.assertEqual(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
self.assertNotIn(-1, p10)
dla i w range(10):
self.assertIn(i, p10)
self.assertNotIn(10, p10)
def test_weakrefs(self):
# Testing weak references...
zaimportuj weakref
klasa C(object):
dalej
c = C()
r = weakref.ref(c)
self.assertEqual(r(), c)
usuń c
support.gc_collect()
self.assertEqual(r(), Nic)
usuń r
klasa NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
spróbuj:
weakref.ref(no)
wyjąwszy TypeError jako msg:
self.assertIn("weak reference", str(msg))
inaczej:
self.fail("weakref.ref(no) should be illegal")
klasa Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
self.assertEqual(r(), yes)
usuń yes
support.gc_collect()
self.assertEqual(r(), Nic)
usuń r
def test_properties(self):
# Testing property...
klasa C(object):
def getx(self):
zwróć self.__x
def setx(self, value):
self.__x = value
def delx(self):
usuń self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
self.assertNotHasAttr(a, "x")
a.x = 42
self.assertEqual(a._C__x, 42)
self.assertEqual(a.x, 42)
usuń a.x
self.assertNotHasAttr(a, "x")
self.assertNotHasAttr(a, "_C__x")
C.x.__set__(a, 100)
self.assertEqual(C.x.__get__(a), 100)
C.x.__delete__(a)
self.assertNotHasAttr(a, "x")
raw = C.__dict__['x']
self.assertIsInstance(raw, property)
attrs = dir(raw)
self.assertIn("__doc__", attrs)
self.assertIn("fget", attrs)
self.assertIn("fset", attrs)
self.assertIn("fdel", attrs)
self.assertEqual(raw.__doc__, "I'm the x property.")
self.assertIs(raw.fget, C.__dict__['getx'])
self.assertIs(raw.fset, C.__dict__['setx'])
self.assertIs(raw.fdel, C.__dict__['delx'])
dla attr w "fget", "fset", "fdel":
spróbuj:
setattr(raw, attr, 42)
wyjąwszy AttributeError jako msg:
jeżeli str(msg).find('readonly') < 0:
self.fail("when setting readonly attr %r on a property, "
"got unexpected AttributeError msg %r" % (attr, str(msg)))
inaczej:
self.fail("expected AttributeError z trying to set readonly %r "
"attr on a property" % attr)
raw.__doc__ = 42
self.assertEqual(raw.__doc__, 42)
klasa D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
spróbuj:
dla i w d:
str(i)
wyjąwszy ZeroDivisionError:
dalej
inaczej:
self.fail("expected ZeroDivisionError z bad property")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted przy -O2 oraz above")
def test_properties_doc_attrib(self):
klasa E(object):
def getter(self):
"getter method"
zwróć 0
def setter(self_, value):
"setter method"
dalej
prop = property(getter)
self.assertEqual(prop.__doc__, "getter method")
prop2 = property(fset=setter)
self.assertEqual(prop2.__doc__, Nic)
@support.cpython_only
def test_testcapi_no_segfault(self):
# this segfaulted w 2.5b2
spróbuj:
zaimportuj _testcapi
wyjąwszy ImportError:
dalej
inaczej:
klasa X(object):
p = property(_testcapi.test_with_docstring)
def test_properties_plus(self):
klasa C(object):
foo = property(doc="hello")
@foo.getter
def foo(self):
zwróć self._foo
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self):
usuń self._foo
c = C()
self.assertEqual(C.foo.__doc__, "hello")
self.assertNotHasAttr(c, "foo")
c.foo = -42
self.assertHasAttr(c, '_foo')
self.assertEqual(c._foo, 42)
self.assertEqual(c.foo, 42)
usuń c.foo
self.assertNotHasAttr(c, '_foo')
self.assertNotHasAttr(c, "foo")
klasa D(C):
@C.foo.deleter
def foo(self):
spróbuj:
usuń self._foo
wyjąwszy AttributeError:
dalej
d = D()
d.foo = 24
self.assertEqual(d.foo, 24)
usuń d.foo
usuń d.foo
klasa E(object):
@property
def foo(self):
zwróć self._foo
@foo.setter
def foo(self, value):
podnieś RuntimeError
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self, value=Nic):
usuń self._foo
e = E()
e.foo = -42
self.assertEqual(e.foo, 42)
usuń e.foo
klasa F(E):
@E.foo.deleter
def foo(self):
usuń self._foo
@foo.setter
def foo(self, value):
self._foo = max(0, value)
f = F()
f.foo = -10
self.assertEqual(f.foo, 0)
usuń f.foo
def test_dict_constructors(self):
# Testing dict constructor ...
d = dict()
self.assertEqual(d, {})
d = dict({})
self.assertEqual(d, {})
d = dict({1: 2, 'a': 'b'})
self.assertEqual(d, {1: 2, 'a': 'b'})
self.assertEqual(d, dict(list(d.items())))
self.assertEqual(d, dict(iter(d.items())))
d = dict({'one':1, 'two':2})
self.assertEqual(d, dict(one=1, two=2))
self.assertEqual(d, dict(**d))
self.assertEqual(d, dict({"one": 1}, two=2))
self.assertEqual(d, dict([("two", 2)], one=1))
self.assertEqual(d, dict([("one", 100), ("two", 200)], **d))
self.assertEqual(d, dict(**d))
dla badarg w 0, 0, 0j, "0", [0], (0,):
spróbuj:
dict(badarg)
wyjąwszy TypeError:
dalej
wyjąwszy ValueError:
jeżeli badarg == "0":
# It's a sequence, oraz its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better jako a ValueError than a TypeError.
dalej
inaczej:
self.fail("no TypeError z dict(%r)" % badarg)
inaczej:
self.fail("no TypeError z dict(%r)" % badarg)
spróbuj:
dict({}, {})
wyjąwszy TypeError:
dalej
inaczej:
self.fail("no TypeError z dict({}, {})")
klasa Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
spróbuj:
dict(Mapping())
wyjąwszy TypeError:
dalej
inaczej:
self.fail("no TypeError z dict(incomplete mapping)")
Mapping.keys = lambda self: list(self.dict.keys())
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(Mapping())
self.assertEqual(d, Mapping.dict)
# Init z sequence of iterable objects, each producing a 2-sequence.
klasa AddressBookEnspróbuj:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
zwróć iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
self.assertEqual(d, dict([(i, i+1) dla i w range(4)]))
# Bad sequence lengths.
dla bad w [('tooshort',)], [('too', 'long', 'by 1')]:
spróbuj:
dict(bad)
wyjąwszy ValueError:
dalej
inaczej:
self.fail("no ValueError z dict(%r)" % bad)
def test_dir(self):
# Testing dir() ...
junk = 12
self.assertEqual(dir(), ['junk', 'self'])
usuń junk
# Just make sure these don't blow up!
dla arg w 2, 2, 2j, 2e0, [2], "2", b"2", (2,), {2:2}, type, self.test_dir:
dir(arg)
# Test dir on new-style classes. Since these have object jako a
# base class, a lot more gets sucked in.
def interesting(strings):
zwróć [s dla s w strings jeżeli nie s.startswith('_')]
klasa C(object):
Cdata = 1
def Cmethod(self): dalej
cstuff = ['Cdata', 'Cmethod']
self.assertEqual(interesting(dir(C)), cstuff)
c = C()
self.assertEqual(interesting(dir(c)), cstuff)
## self.assertIn('__self__', dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
| |
username, bookname):
return folders(request, username, bookname, '')
#TODO: get the snippets' folder?
@login_required
def folders(request, username, bookname, folder_name):
#TODO: don't query the notes if no folder name. Also get rid of using /notes/folders// in url and use /notes/folders/
F = getFolder(username, bookname)
N = getNote(username, bookname)
T = getT(username)
note_list = N.objects.all()
if request.user.username != username:
log.debug( 'Not the owner of the notes requested, getting public notes only...')
note_list = get_public_notes(note_list)
qstr = ""
if folder_name:
folder = F.objects.get(name=folder_name)
qstr = folder.value
#request_path = '/'+username+'/notes/?q='+django.utils.http.urlquote_plus(qstr)
note_list = getSearchResults(note_list, qstr)
#TODO: no need of below in folders aspect
#default_tag_id = T.objects.get(name='untagged').id
context = __get_context(request, note_list, #default_tag_id,
username, bookname)
folders = context.get('folders') #TODO: if folders is empty
folder_values, is_in_folders, current_folder = __get_folder_context(folders, qstr)
extra_context = {'qstr':qstr,'folder_values':folder_values, 'is_in_folders':is_in_folders, 'current_folder':current_folder, 'aspect_name':'folders'}
context.update(extra_context)
return render_to_response(book_template_dict.get(bookname)+'folders.html', context, context_instance=RequestContext(request,{'bookname': bookname,
'note_type':bookname_note_type_dict.get(bookname),
'book_uri_prefix':'/'+username,
'profile_member':Member.objects.get(username=username)}))
#TODO:add protection
#below is copied from note_raw except using a different template page
@login_required
@cache_page(30)
def note(request, username, bookname, note_id):
log.debug('Getting the note:'+note_id)
#TODO: make this into decorator
#===============================================================================
# if username != request.user.username:
# raise Http404
#===============================================================================
if 'framebook' == bookname:
return frame(request, username, note_id)
N = getNote(username, bookname)
note = get_object_or_404(N, pk=note_id)
#linkages = note.linkagenote_set.all()
notes_included = None
if note.get_note_type() == 'Frame':
notes_included = note.frame.notes.using(username).all()
UpdateNForm = create_model_form("UpdateNForm_"+str(username), N, fields={'tags':forms.ModelMultipleChoiceField(queryset=__get_ws_tags(request, username, bookname))})
note_form = UpdateNForm(instance=note)
N_T = getNoteTranslation(username)
UpdateNoteTransForm = create_model_form("UpdateNoteTransForm_"+str(username), N_T)
if not note.get_lang():
note_trans_form = UpdateNoteTransForm()
else:
note_trans = Note_Translation.objects.using(username).get(note=note)
note_trans_form = UpdateNoteTransForm(instance=note_trans)
tags = __get_ws_tags(request, username, bookname)
pick_lang = __get_lang(request)
return render_to_response(book_template_dict.get(bookname)+'notes/note/note.html', {'note':note, 'notes_included':notes_included, \
'note_form':note_form, 'profile_username':username, \
'note_trans_form':note_trans_form,\
'pick_lang':pick_lang, 'tags':tags,
'pagename':'note', 'appname':'notes' #TODO: in the future, get the app name from the app the view is included
},
context_instance=RequestContext(request, {'bookname': bookname,\
'aspect_name':'notes',\
'book_uri_prefix':'/'+username,
'profile_member':Member.objects.get(username=username) }))
import django
def __get_lang(request):
pick_lang = request.GET.get('pick_lang')
if not pick_lang:
current_active_lang = django.utils.translation.get_language()
#print 'current_active_lang', current_active_lang
if current_active_lang == 'zh-cn':
pick_lang = 'C'
else:
pick_lang = 'E'
return pick_lang
#TODO:add protection
@login_required
def note_raw(request, username, bookname, note_id):
log.debug('Getting the note:'+note_id)
N = getNote(username, bookname)
note = N.objects.get(id=note_id)
linkages = note.linkagenote_set.all()
UpdateNForm = create_model_form("UpdateNForm_"+str(username), N, fields={'tags':forms.ModelMultipleChoiceField(queryset=__get_ws_tags(request, username, bookname))})
note_form = UpdateNForm(instance=note)
return render_to_response(book_template_dict.get(bookname)+'notes/note_raw.html', {'note':note, 'linkages':linkages,'note_form':note_form, 'profile_username':username}, context_instance=RequestContext(request, {'bookname': bookname,'aspect_name':'notes'}))
#below is not used
#def alltags(request, username):
# note_list = Note.objects.filter(delete=False).order_by('-init_date')
# return render_to_response('notes/note_list_by_tag.html', {'note_list': note_list, 'tags':Tag.objects.all(),
# 'current_tag':'','view_mode':view_mode, 'sort':sort})
@login_required
def update_note(request, note_id, username, bookname):
#with the current url.py inclusion, username will have to be passed in the parameters. But inside the method, we choose not to use it at all
#so we make sure the requesting user is only changing his own content
log.debug( 'updating note :'+note_id)
#N = getNote(username, bookname) #TODO:isn't this better?
# note = N.objects.get(id=note_id)
note = Note.objects.using(request.user.username).get(id=note_id)
#TODO: probably there is no need with the complicated dynamic class generation anymore. Just use the way below
note.owner_name = request.user.username
note.title = request.POST.get('title')
note.desc = request.POST.get('desc')
note.event = request.POST.get('event')
note.private = request.POST.get('private', False)
note.deleted = request.POST.get('delete', False)
#note.init_date = request.POST.get('init_date')
if note.get_note_type() == 'Frame':
note.frame.owner_name = username
note.vote = note.frame.get_vote()
note.tags = note.frame.get_sum_of_note_tag_ids()
else:
note.vote = request.POST.get('vote')
#note.tags = request.POST.getlist('tags')
url = request.POST.get('url')
if url:
if note.get_note_type() == 'Bookmark':
note.bookmark.owner_name = username
note.bookmark.url = url
note.bookmark.save()
if note.get_note_type() == 'Scrap':
note.scrap.owner_name = username
note.scrap.url = url
note.scrap.save()
file = request.FILES.get('attachment')
attachment_clear = request.POST.get('attachment-clear')
if file:
full_name = get_storage_loc(note, file.name)
if len(full_name) > 100:
messages.error(request, _("Error uploading the file. The file name is too long! Please use a shorter file name. You can reduce your file name by ")+str((len(full_name)-100))+_(' letters.')) #TODO
return HttpResponseRedirect(request.META.get('HTTP_REFERER','/'))
#TODO: validate the uploaded file for security
#TODO:check the official way of using attachment-clear field or ClearableFileInput
if file or attachment_clear:
if note.get_note_type() == 'Frame':
note.frame.owner_name = username
note.frame.attachment = file
#have to call the subclass' save method since the parent doesn't know about attachment
note.frame.save()
if note.get_note_type() == 'Snippet':
note.snippet.owner_name = username
note.snippet.attachment = file
note.snippet.save()
note.save()
log.debug( 'the note %s is updated.' % (note_id))
full_path = request.get_full_path()
pre_url = full_path[:-11] #remove "addNote/"
log.debug( 'redirect to the page that add note form is submitted from:'+pre_url)
messages.success(request, _("Note is successfully updated!")) #TODO
return HttpResponseRedirect(request.META.get('HTTP_REFERER','/'))
@login_required
def update_note_trans(request, note_id, username, bookname):
#note_id is the id of the original note
N = getNote(request.user.username, bookname)
note = N.objects.get(id=note_id)
#trans, created = Note_Translation.objects.using(username).get_or_create(note=note)
title = request.POST.get('title')
desc = request.POST.get('desc')
lang = request.POST.get('lang')
original_lang = request.POST.get('original_lang')
note.set_translation(original_lang, lang, title, desc)
note.save()
#trans.owner_name = username
#trans.save()
#TODO: use below to replace all return HttpResponseRedirect(__get_pre_url(request))
return HttpResponseRedirect(request.META.get('HTTP_REFERER','/'))
#set the order for the notes in a frame. So this only is supposed to be for framebook
@login_required
def set_notes_order_in_frame(request, note_id, username, bookname):
log.debug( 'setting the order of the notes in a frame:'+note_id)
ordered_notes = request.GET.get('ordered_notes').split(',')
#print 'ordered_notes:',ordered_notes
N = getNote(request.user.username, bookname)
f = N.objects.get(id=note_id)
f.set_notes_order(ordered_notes)
return HttpResponse('successful', mimetype="text/plain")
@login_required
def update_note_inline(request, username, bookname):
note_id = request.POST.get('id')
content = request.POST.get('content')
note_field = request.POST.get('note_field')
N = getNote(request.user.username, bookname)
note = N.objects.get(id=note_id)
if note_field=='note_title':
note.title = content
if note_field=='note_desc':
note.desc = content
if note_field=='note_event':
note.event = content.lstrip("{").rstrip("}") #TODO: change Note's event to extra, the same as bookmarks and scraps
#below is not used anymore. Instead, update_note_tags_inline is used.
if note_field=='note_tags':
note.update_tags(content)
#note.tags = content
if note_field=='note_init_date':
note.init_date = datetime.datetime.strptime(content,'%Y-%m-%d %H:%M')
#below is not used anymore. update_note_included_notes_inline is used to add notes instead
if note_field=='note_add_notes':
note.add_notes(content)
note.save()
log.debug( 'note updated')
#TODO: if error
return HttpResponse(content, mimetype="text/plain")
@login_required
def update_note_tags_inline(request, username, bookname):
note_id = request.POST.get('id')
tags = request.POST.get('tags')
#strip away special tags that should not be changed by the user
#it is already enforced at db level. Code below can be removed TODO:
tags_clean = [tag for tag in tags.split(',') if not tag.startswith('takenfrom:')]
N = getNote(request.user.username, bookname)
note = N.objects.get(id=note_id)
note.update_tags(','.join(tags_clean))
note.save()
return HttpResponse(simplejson.dumps({'note_id':note.id, 'display_tags':note.display_tags(),\
'note_tags':note.get_tags()}),
"application/json")
#TODO: after adding notes to a frame, should we automatically save the current ordering or pop a window asking the user to save the ordering?
#If not doing either and the current frame already has an ordering, the newly added notes will make the ordering not functioning. See get_notes_order in Frame model.
@login_required
def add_notes_to_frame(request, username, bookname):
note_id = request.POST.get('id')
included_notes_added = request.POST.get('included_notes_added')
N = getNote(request.user.username, bookname)
note = N.objects.get(id=note_id)
try:
#TODO: remove the note itself and note that is already included
notes_to_add = __get_notes_by_ids(included_notes_added.split(','), request.user.username, 'notebook')
except (UnicodeEncodeError, ValueError):
return HttpResponse(simplejson.dumps({'type':'error', 'msg':_('Please enter a note id or comma separated note ids in the box!'), 'result':{'note_id':note.id}}),
"application/json")
#TODO: find or write function that mimic truncatewords in template
notes_to_add_clean = [n for n in notes_to_add if n.id != note.id and n.id not in note.notes.values_list('id', flat=True)]
notes_to_add_clean_return = [[n.id, n.title, n.desc[0:200], n.vote, n.get_note_bookname(), n.get_note_type()] for n in notes_to_add_clean]
notes_to_add_clean_str = ','.join([str(n.id) for n in notes_to_add_clean])
note.add_notes(notes_to_add_clean_str)
note.owner_name = request.user.username
note.vote = note.get_vote()
note.tags = note.get_sum_of_note_tag_ids()
note.save()
return HttpResponse(simplejson.dumps({'type':'success', 'result':{'note_id':note.id, 'notes_added':notes_to_add_clean_return}}),
"application/json")
def create_note_in_frame(request, username, bookname):
note_id = request.POST.get('id')
note_created_desc = request.POST.get('note_created_desc')
#bookname should always be framebook here
N = getNote(request.user.username, bookname)
frame = N.objects.get(id=note_id)
#for now, only create a snippet directly inside a frame. Think of creating bookmark or scrap later TODO:
N_To_Include = getNote(request.user.username, 'snippetbook')
#there might be multiple notes with the same desc. If so, just get the first one for now. TODO:
if N_To_Include.objects.filter(desc=note_created_desc).count() > 1:
note_to_include = N_To_Include.objects.filter(desc=note_created_desc)[0]
else:
note_to_include, created = N_To_Include.objects.get_or_create(desc=note_created_desc)
T = getT(request.user.username)
if frame.title.startswith('Weekly Plan:'):
t1, t1_created = T.objects.get_or_create(name='weekly')
t2, t2_created = T.objects.get_or_create(name='plan')
note_to_include.tags.add(t1)
note_to_include.tags.add(t2)
note_to_include.save()
if frame.title.startswith('Monthly Plan:'):
t1, t1_created = T.objects.get_or_create(name='monthly')
t2, t2_created = T.objects.get_or_create(name='plan')
note_to_include.tags.add(t1)
note_to_include.tags.add(t2)
note_to_include.save()
#=======think of getting rid of untagged | |
the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
Return:
Dictionary of smoc scores for residues in the chain
"""
blurrer = StructureBlurrer()
sim_map = blurrer.gaussian_blur_real_space(structure_instance, resolution_densMap,densMap=map_target,sigma_coeff=sigma_map,normalise=True)
peak,ave,sigma = sim_map._peak_density()
#NOTE: filter background
sim_map.fullMap = sim_map.fullMap*(sim_map.fullMap > peak)
dict_chain_indices,dict_chain_res, dict_res_dist = blurrer.get_indices(structure_instance,map_target,resolution_densMap,sigma_map)
#get details of map
origin = map_target.origin
apix = map_target.apix
box_size = map_target.box_size()
nz,ny,nx = map_target.fullMap.shape
zg,yg,xg = mgrid[0:nz,0:ny,0:nx]
indi = list(zip(xg.ravel(), yg.ravel(), zg.ravel()))
#save rigid body details
dict_rf_res = {}
dict_rf_sc = {}
res_list = []
rb_list = []
list_sccc = []
#save scores for each chain and res
dict_chain_scores = {}
#TODO: add multi-chain rigid body parser below
'''
r_ct = 0
if rigid_body_file != None:
inp = open(rigid_body_file,'r')
for l in inp:
if l[0] != '#':
score_indices = []
lrb = l.split()
if len(lrb) == 0: continue
r_ct += 1
res_list = []
rb_pairs = []
# get scores for each res and each rigid body
for i in range(max((len(lrb)/2)-1,1)):
rb_pairs.append([int(lrb[2*i]),int(lrb[2*i+1])])
# NOTE: wont work for insertion codes
for r in range(int(lrb[2*i]),int(lrb[2*i+1])+1):
score_indices.extend(dict_res_indices[r])
res_list.append(r)
rb_list.append(lrb)
dict_rf_res[r_ct] = rb_pairs
if len(score_indices) == 0:
dict_rf_sc[r_ct] = 0.0#-0.99
for res in res_list: dict_res_scores[res] = 0.0#-0.99
continue
tmplist = score_indices[:]
setlist = set(tmplist)
score_indices = list(setlist)
sc_indices = []
for ii in score_indices: sc_indices.append(indi[ii])
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
# get indices for use with map arrays: ([z...],[y...],x...])
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_rf_sc[r_ct] = sccc
#save scores
for res in res_list:
dict_res_scores[res] = sccc
list_sccc.append(sccc)
inp.close()
'''
#for residues not in rigid bodies: consider pentapeptides
for ch in dict_chain_indices:
dict_res_scores = {}
dict_res_indices = dict_chain_indices[ch]
for res in dict_res_indices:
if not res in dict_res_scores.keys():
indices = dict_res_indices[res][:]
#consider residues on both sides. NOTE: wont work for insertion codes!
#need to rewite res numbers to avoid insertion codes
for ii in range(1,int(round((win+1)/2))):
try:
#get prev residue indices
indices.extend(dict_res_indices[dict_chain_res[ch][dict_chain_res[ch].index(res)-ii]])
except: pass
for ii in range(1,int(round((win+1)/2))):
try:
indices.extend(dict_res_indices[dict_chain_res[ch][dict_chain_res[ch].index(res)+ii]])
except: pass
tmplist = indices[:]
setlist = set(tmplist)
indices = list(setlist)
sc_indices = []
for ii in indices: sc_indices.append(indi[ii])
if len(indices) < 10:
try:
dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)-1]]
try: dict_res_scores[res] = (dict_res_scores[res]+dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]])/2.0
except (IndexError,KeyError): pass
except (IndexError,KeyError):
try: dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]]
except (IndexError,KeyError): dict_res_scores[res] = 0.0
continue
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_res_scores[res] = sccc
if sccc == -1.0:
try:
dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)-1]]
try: dict_res_scores[res] = (dict_res_scores[res]+dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]])/2.0
except (IndexError,KeyError): pass
except IndexError:
try: dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]]
except (IndexError,KeyError): dict_res_scores[res] = 0.0
continue
list_sccc.append(sccc)
dict_chain_scores[ch] = dict_res_scores
return dict_chain_scores, dict_chain_res
def _SMOC1(self,map_target,resolution_densMap,structure_instance,win=11,rigid_body_file=None,sigma_map=0.225,write=False):
"""
Calculate Local cross correlation (Mander's Overlap)
It is a local Overlap Coefficient calculated on atoms in sliding residue windows along the chain.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*structure_instance*
Model structure instance.
*win*
Overlapping Window length to calculate the score
*rigid_body_file*
Rigid-body file.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
Return:
Dictionary of smoc scores for residues in the chain
"""
blurrer = StructureBlurrer()
sim_map = blurrer.gaussian_blur_real_space(structure_instance, resolution_densMap,densMap=map_target,sigma_coeff=sigma_map,normalise=True)
peak,ave,sigma = sim_map._peak_density()
#NOTE: filter background
sim_map.fullMap = sim_map.fullMap*(sim_map.fullMap > peak)
dict_res_indices,dict_res_dist = blurrer.get_indices(structure_instance,map_target,resolution_densMap)
#get details of map
origin = map_target.origin
apix = map_target.apix
box_size = map_target.box_size()
nz,ny,nx = map_target.fullMap.shape
zg,yg,xg = mgrid[0:nz,0:ny,0:nx]
indi = list(zip(xg.ravel(), yg.ravel(), zg.ravel()))
#save rigid body details
dict_rf_res = {}
dict_rf_sc = {}
res_list = []
rb_list = []
list_sccc = []
#save scores for each res
dict_res_scores = {}
r_ct = 0
if rigid_body_file != None:
inp = open(rigid_body_file,'r')
for l in inp:
if l[0] != '#':
score_indices = []
lrb = l.split()
if len(lrb) == 0: continue
r_ct += 1
res_list = []
rb_pairs = []
# get scores for each res and each rigid body
for i in range(max((len(lrb)/2)-1,1)):
rb_pairs.append([int(lrb[2*i]),int(lrb[2*i+1])])
# NOTE: wont work for insertion codes
for r in range(int(lrb[2*i]),int(lrb[2*i+1])+1):
score_indices.extend(dict_res_indices[r])
res_list.append(r)
rb_list.append(lrb)
dict_rf_res[r_ct] = rb_pairs
if len(score_indices) == 0:
dict_rf_sc[r_ct] = 0.0#-0.99
for res in res_list: dict_res_scores[res] = 0.0#-0.99
continue
tmplist = score_indices[:]
setlist = set(tmplist)
score_indices = list(setlist)
sc_indices = []
for ii in score_indices: sc_indices.append(indi[ii])
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
# get indices for use with map arrays: ([z...],[y...],x...])
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_rf_sc[r_ct] = sccc
#save scores
for res in res_list:
dict_res_scores[res] = sccc
list_sccc.append(sccc)
inp.close()
#for residues not in rigid bodies: consider pentapeptides
for res in dict_res_indices:
if not res in dict_res_scores.keys():
indices = dict_res_indices[res][:]
#consider residues on both sides. NOTE: wont work for insertion codes!
#need to rewite res numbers to avoid insertion codes
for ii in range(1,int(round((win+1)/2))):
try:
indices.extend(dict_res_indices[res-ii])
except: pass
for ii in range(1,int(round((win+1)/2))):
try:
indices.extend(dict_res_indices[res+ii])
except: pass
tmplist = indices[:]
setlist = set(tmplist)
indices = list(setlist)
sc_indices = []
for ii in indices: sc_indices.append(indi[ii])
if len(indices) == 0:
dict_res_scores[res] = 0.0#-0.99
continue
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_res_scores[res] = sccc
list_sccc.append(sccc)
return dict_res_scores
def _get_shell(self,dist1,maxlevel,step):
# indices between upper and lower shell bound
fshells1 = ((dist1 < min(maxlevel,x+step)) & (dist1 >= x))
# match power spectra for two maps
def _amplitude_match(self,map_1,map_2,shellmin,shellmax,step=0.005,c1=0,c2=0,reso=None,lpfiltb=False,lpfilta=False,ref=False):
# fourier transform: use pyfftw if available
pyfftw_flag = 1
try:
import pyfftw
except ImportError: pyfftw_flag = 0
try:
if pyfftw_flag == 0: raise ImportError
inputa1 = pyfftw.n_byte_align_empty(map_1.fullMap.shape, 16, 'complex128')
outputa1 = pyfftw.n_byte_align_empty(map_1.fullMap.shape, 16, 'complex128')
# fft planning, set planning_timelimit or flags to make it faster
fft = pyfftw.FFTW(inputa1,outputa1,direction='FFTW_FORWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa1[:,:,:] = map_1.fullMap[:,:,:]
fft()
ft1 = Map(fftshift(outputa1), map_1.origin, map_1.apix, map_1.filename, map_1.header[:])
except:
# use numpy fft instead
ft1 = map_1.fourier_transform()
try:
if pyfftw_flag == 0: raise ImportError
inputa2 = pyfftw.n_byte_align_empty(map_2.fullMap.shape, 16, 'complex128')
outputa2 = pyfftw.n_byte_align_empty(map_2.fullMap.shape, 16, 'complex128')
fft = pyfftw.FFTW(inputa2,outputa2,direction='FFTW_FORWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa2[:,:,:] = map_2.fullMap[:,:,:]
fft()
ft2 = Map(fftshift(outputa2), map_2.origin, map_2.apix, map_2.filename, map_2.header[:])
except:
ft2 = map_2.fourier_transform()
#low pass filter before scaling
if reso != None:
cutoff1 = map_1.apix/float(reso)
cutoff2 = map_2.apix/float(reso)
if lpfiltb and not lpfilta:
ft1._tanh_lowpass(cutoff1,fall=0.2,ftmap=True)
ft2._tanh_lowpass(cutoff2,fall=0.2,ftmap=True)
# max dimension
size1 = max(map_1.x_size(),map_1.y_size(),map_1.z_size())
#shell values correspond to freq: 0-0.5 (nyquist)
dist1 = map_1._make_fourier_shell(1)/map_1.apix
size2 = max(map_2.x_size(),map_2.y_size(),map_2.z_size())
#shell values correspond to freq: 0-0.5 (nyquist)
dist2 = map_2._make_fourier_shell(1)/map_2.apix
#SCALING
# storing for plots
ft1_avg = []
ft2_avg = []
ft1_avg_new = []
lfreq = []
# select max spatial frequency to iterate to. low resolution map
maxlevel = 0.5/max(map_1.apix,map_2.apix)
# loop over freq shells, shellwidth=0.005
#for x in arange(0,maxlevel+step,step):
nc = 0
x = 0.0
highlevel = x+step
while (x<maxlevel):
#print x,highlevel, maxlevel
# indices between upper and lower shell bound
fshells1 = ((dist1 < min(maxlevel,highlevel)) & (dist1 >= x))
# radial average
shellvec1 = ft1.fullMap[fshells1]
# indices between upper and lower shell bound
fshells2 = ((dist2 < min(maxlevel,highlevel)) & (dist2 >= x))
# radial average
shellvec2 = ft2.fullMap[fshells2]
#if len(shellvec1) == 0 | |
to plotting units.
:param field_name: Name of radar field (must be accepted by
`radar_utils.check_field_name`).
:return: new_field_name: Same as input, except in plotting units.
"""
radar_utils.check_field_name(field_name)
if field_name in radar_utils.ECHO_TOP_NAMES:
return field_name.replace('_km', '_kft')
if field_name in SHEAR_VORT_DIV_NAMES:
return field_name.replace('_s01', '_ks01')
return field_name
def field_name_to_verbose(field_name, include_units=True):
"""Converts field name from default format to verbose.
:param field_name: See doc for `radar_utils.field_name_to_verbose`.
:param include_units: Same.
:return: field_name_verbose: Same.
"""
field_name_verbose = radar_utils.field_name_to_verbose(
field_name=field_name, include_units=include_units)
return field_name_verbose.replace('(m ASL)', '(kft ASL)').replace(
'(s', '(ks')
def layer_operations_to_names(
list_of_layer_operation_dicts, include_units=True):
"""Converts list of layer operations to list of field and panel names.
P = number of layer operations = number of panels
:param list_of_layer_operation_dicts: See doc for
`input_examples.reduce_examples_3d_to_2d`.
:param include_units: Boolean flag. If True, panel names will include
units.
:return: field_name_by_panel: length-P list with names of radar fields.
:return: panel_names: length-P list of panel names (to be printed at bottoms
of panels).
"""
error_checking.assert_is_boolean(include_units)
num_panels = len(list_of_layer_operation_dicts)
field_name_by_panel = [''] * num_panels
panel_names = [''] * num_panels
for i in range(num_panels):
this_operation_dict = list_of_layer_operation_dicts[i]
field_name_by_panel[i] = this_operation_dict[
input_examples.RADAR_FIELD_KEY]
this_field_name_verbose = field_name_to_verbose(
field_name=field_name_by_panel[i], include_units=include_units)
this_min_height_km_agl = int(numpy.round(
this_operation_dict[input_examples.MIN_HEIGHT_KEY] * METRES_TO_KM
))
this_max_height_km_agl = int(numpy.round(
this_operation_dict[input_examples.MAX_HEIGHT_KEY] * METRES_TO_KM
))
panel_names[i] = '{0:s}\n{1:s} from {2:d}-{3:d} km AGL'.format(
this_field_name_verbose,
this_operation_dict[input_examples.OPERATION_NAME_KEY].upper(),
this_min_height_km_agl, this_max_height_km_agl
)
return field_name_by_panel, panel_names
def fields_and_heights_to_names(
field_names, heights_m_agl, include_units=True):
"""Converts list of radar field/height pairs to panel names.
P = number of panels
:param field_names: length-P list with names of radar fields. Each must be
accepted by `radar_utils.check_field_name`.
:param heights_m_agl: length-P numpy array of heights (metres above ground
level).
:param include_units: Boolean flag. If True, panel names will include
units.
:return: panel_names: length-P list of panel names (to be printed at bottoms
of panels).
"""
error_checking.assert_is_boolean(include_units)
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names), num_dimensions=1)
num_panels = len(field_names)
error_checking.assert_is_numpy_array(
heights_m_agl, exact_dimensions=numpy.array([num_panels])
)
error_checking.assert_is_geq_numpy_array(heights_m_agl, 0.)
heights_m_agl = numpy.round(heights_m_agl).astype(int)
panel_names = [''] * num_panels
for i in range(num_panels):
this_field_name_verbose = field_name_to_verbose(
field_name=field_names[i], include_units=include_units)
panel_names[i] = '{0:s}\nat {1:d} km AGL'.format(
this_field_name_verbose,
int(numpy.round(heights_m_agl[i] * METRES_TO_KM))
)
return panel_names
def get_default_colour_scheme(field_name, opacity=DEFAULT_OPACITY):
"""Returns default colour scheme for radar field.
:param field_name: Field name (must be accepted by
`radar_utils.check_field_name`).
:param opacity: Opacity (in range 0...1).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
radar_utils.check_field_name(field_name)
error_checking.assert_is_greater(opacity, 0.)
error_checking.assert_is_leq(opacity, 1.)
colour_map_object = None
colour_norm_object = None
if field_name in radar_utils.REFLECTIVITY_NAMES:
colour_map_object, colour_norm_object = (
_get_reflectivity_colour_scheme()
)
elif field_name in radar_utils.SHEAR_NAMES:
colour_map_object, colour_norm_object = _get_az_shear_colour_scheme()
elif field_name in radar_utils.ECHO_TOP_NAMES:
colour_map_object, colour_norm_object = _get_echo_top_colour_scheme()
elif field_name == radar_utils.MESH_NAME:
colour_map_object, colour_norm_object = _get_mesh_colour_scheme()
elif field_name == radar_utils.SHI_NAME:
colour_map_object, colour_norm_object = _get_shi_colour_scheme()
elif field_name == radar_utils.VIL_NAME:
colour_map_object, colour_norm_object = _get_vil_colour_scheme()
elif field_name == radar_utils.DIFFERENTIAL_REFL_NAME:
colour_map_object, colour_norm_object = _get_zdr_colour_scheme()
elif field_name == radar_utils.SPEC_DIFF_PHASE_NAME:
colour_map_object, colour_norm_object = _get_kdp_colour_scheme()
elif field_name == radar_utils.CORRELATION_COEFF_NAME:
colour_map_object, colour_norm_object = _get_rho_hv_colour_scheme()
elif field_name == radar_utils.SPECTRUM_WIDTH_NAME:
colour_map_object, colour_norm_object = (
_get_spectrum_width_colour_scheme()
)
elif field_name == radar_utils.VORTICITY_NAME:
colour_map_object, colour_norm_object = _get_vorticity_colour_scheme()
elif field_name == radar_utils.DIVERGENCE_NAME:
colour_map_object, colour_norm_object = _get_divergence_colour_scheme()
num_colours = len(colour_map_object.colors)
for i in range(num_colours):
colour_map_object.colors[i] = matplotlib.colors.to_rgba(
colour_map_object.colors[i], opacity
)
return colour_map_object, colour_norm_object
def plot_latlng_grid(
field_matrix, field_name, axes_object, min_grid_point_latitude_deg,
min_grid_point_longitude_deg, latitude_spacing_deg,
longitude_spacing_deg, colour_map_object=None, colour_norm_object=None,
refl_opacity=DEFAULT_OPACITY):
"""Plots lat-long grid as colour map.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
Because this method plots a lat-long grid (rather than an x-y grid), if you
have used Basemap to plot borders or anything else, the only acceptable
projection is cylindrical equidistant (in which x = longitude and
y = latitude, so no coordinate conversion is necessary).
To use the default colour scheme for the given radar field, leave
`colour_map_object` and `colour_norm_object` empty.
:param field_matrix: M-by-N numpy array with values of radar field.
:param field_name: Name of radar field (must be accepted by
`radar_utils.check_field_name`).
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param min_grid_point_latitude_deg: Minimum latitude (deg N) over all grid
points. This should be the latitude in the first row of `field_matrix`
-- i.e., at `field_matrix[0, :]`.
:param min_grid_point_longitude_deg: Minimum longitude (deg E) over all grid
points. This should be the longitude in the first column of
`field_matrix` -- i.e., at `field_matrix[:, 0]`.
:param latitude_spacing_deg: Spacing (deg N) between grid points in adjacent
rows.
:param longitude_spacing_deg: Spacing (deg E) between grid points in
adjacent columns.
:param colour_map_object: Instance of `matplotlib.pyplot.cm`. If this is
None, the default colour scheme for `field_name` will be used.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`. If
this is None, the default colour scheme for `field_name` will be used.
:param refl_opacity: Opacity for reflectivity colour scheme. Used only if
`colour_map_object is None and colour_norm_object is None`.
"""
field_matrix = _field_to_plotting_units(
field_matrix=field_matrix, field_name=field_name)
(field_matrix_at_edges, grid_cell_edge_latitudes_deg,
grid_cell_edge_longitudes_deg
) = grids.latlng_field_grid_points_to_edges(
field_matrix=field_matrix, min_latitude_deg=min_grid_point_latitude_deg,
min_longitude_deg=min_grid_point_longitude_deg,
lat_spacing_deg=latitude_spacing_deg,
lng_spacing_deg=longitude_spacing_deg)
field_matrix_at_edges = numpy.ma.masked_where(
numpy.isnan(field_matrix_at_edges), field_matrix_at_edges)
use_default_colour_scheme = (
colour_map_object is None or colour_norm_object is None
)
if use_default_colour_scheme:
opacity = (
refl_opacity if field_name in radar_utils.REFLECTIVITY_NAMES
else DEFAULT_OPACITY
)
colour_map_object, colour_norm_object = get_default_colour_scheme(
field_name=field_name, opacity=opacity)
else:
if hasattr(colour_norm_object, 'boundaries'):
colour_norm_object.boundaries = _field_to_plotting_units(
field_matrix=colour_norm_object.boundaries,
field_name=field_name)
else:
colour_norm_object.vmin = _field_to_plotting_units(
field_matrix=colour_norm_object.vmin, field_name=field_name)
colour_norm_object.vmax = _field_to_plotting_units(
field_matrix=colour_norm_object.vmax, field_name=field_name)
if hasattr(colour_norm_object, 'boundaries'):
min_colour_value = colour_norm_object.boundaries[0]
max_colour_value = colour_norm_object.boundaries[-1]
else:
min_colour_value = colour_norm_object.vmin
max_colour_value = colour_norm_object.vmax
pyplot.pcolormesh(
grid_cell_edge_longitudes_deg, grid_cell_edge_latitudes_deg,
field_matrix_at_edges, cmap=colour_map_object, norm=colour_norm_object,
vmin=min_colour_value, vmax=max_colour_value, shading='flat',
edgecolors='None', axes=axes_object, zorder=-1e11)
def plot_2d_grid_without_coords(
field_matrix, field_name, axes_object, plot_grid_lines=True,
font_size=DEFAULT_FONT_SIZE, annotation_string=None,
colour_map_object=None, colour_norm_object=None,
refl_opacity=DEFAULT_OPACITY):
"""Plots 2-D grid as colour map.
In this case the grid is not georeferenced (convenient for storm-centered
radar images).
To use the default colour scheme for the given radar field, leave
`colour_map_object` and `colour_norm_object` empty.
:param field_matrix: See doc for `plot_latlng_grid`.
:param field_name: Same.
:param axes_object: Same.
:param plot_grid_lines: Boolean flag. If True, will plot grid lines on
radar image.
:param font_size: Font size for annotation.
:param annotation_string: Annotation (will be printed in the bottom-center).
If you want no annotation, leave this alone.
:param colour_map_object: See doc for `plot_latlng_grid`.
:param colour_norm_object: Same.
:param refl_opacity: Same.
:return: colour_map_object: Same as input, except default might have been
set.
:return: colour_norm_object: Same as input, except default might have been
set.
"""
error_checking.assert_is_numpy_array_without_nan(field_matrix)
error_checking.assert_is_numpy_array(field_matrix, num_dimensions=2)
error_checking.assert_is_boolean(plot_grid_lines)
field_matrix = _field_to_plotting_units(
field_matrix=field_matrix, field_name=field_name)
field_matrix = numpy.ma.masked_where(
numpy.isnan(field_matrix), field_matrix
)
use_default_colour_scheme = (
colour_map_object is None or colour_norm_object is None
)
if use_default_colour_scheme:
opacity = (
refl_opacity if field_name in radar_utils.REFLECTIVITY_NAMES
else DEFAULT_OPACITY
)
colour_map_object, colour_norm_object = get_default_colour_scheme(
field_name=field_name, opacity=opacity)
else:
if hasattr(colour_norm_object, 'boundaries'):
colour_norm_object.boundaries = _field_to_plotting_units(
field_matrix=colour_norm_object.boundaries,
field_name=field_name)
else:
colour_norm_object.vmin = _field_to_plotting_units(
field_matrix=colour_norm_object.vmin, field_name=field_name)
colour_norm_object.vmax = _field_to_plotting_units(
field_matrix=colour_norm_object.vmax, field_name=field_name)
if hasattr(colour_norm_object, 'boundaries'):
min_colour_value = colour_norm_object.boundaries[0]
max_colour_value = colour_norm_object.boundaries[-1]
else:
min_colour_value = colour_norm_object.vmin
max_colour_value = colour_norm_object.vmax
axes_object.pcolormesh(
field_matrix, cmap=colour_map_object, norm=colour_norm_object,
vmin=min_colour_value, vmax=max_colour_value, shading='flat',
edgecolors='None', zorder=-1e11)
if plot_grid_lines:
x_coord_limits = axes_object.get_xlim()
x_grid_coords = numpy.linspace(
x_coord_limits[0], x_coord_limits[1], num=5, dtype=float
)[1:-1]
y_coord_limits = axes_object.get_ylim()
y_grid_coords = numpy.linspace(
y_coord_limits[0], y_coord_limits[1], num=5, dtype=float
)[1:-1]
axes_object.set_xticks(x_grid_coords)
axes_object.set_yticks(y_grid_coords)
axes_object.grid(
b=True, which='major', axis='both', linestyle='--', linewidth=2)
axes_object.xaxis.set_ticklabels([])
axes_object.yaxis.set_ticklabels([])
axes_object.xaxis.set_ticks_position('none')
axes_object.yaxis.set_ticks_position('none')
if annotation_string is not None:
error_checking.assert_is_string(annotation_string)
axes_object.text(
0.5, 0.01, annotation_string, fontsize=font_size, color='k',
bbox=TEXT_BOUNDING_BOX_DICT, horizontalalignment='center',
verticalalignment='bottom', transform=axes_object.transAxes,
zorder=1e10)
return colour_map_object, colour_norm_object
def plot_many_2d_grids(
data_matrix, field_names, axes_objects, panel_names=None,
plot_grid_lines=True, colour_map_objects=None, colour_norm_objects=None,
refl_opacity=DEFAULT_OPACITY, plot_colour_bar_flags=None,
panel_name_font_size=DEFAULT_FONT_SIZE,
colour_bar_font_size=DEFAULT_FONT_SIZE,
colour_bar_length=DEFAULT_COLOUR_BAR_LENGTH):
"""Plots many 2-D grids in paneled figure.
M = number of rows in grid
N = number of columns in grid
C = number of fields
:param data_matrix: M-by-N-by-C numpy array of radar values.
:param field_names: length-C list of field names.
:param axes_objects: length-C list of axes handles (instances of
`matplotlib.axes._subplots.AxesSubplot`).
:param panel_names: length-C list of panel names (to be printed at bottom of
each panel). If None, panel names will not be printed.
:param plot_grid_lines: Boolean flag. If True, will plot grid lines over
radar images.
:param colour_map_objects: length-C list of colour schemes (instances of
`matplotlib.pyplot.cm` or similar). If None, will use default colour
scheme for each field.
:param colour_norm_objects: length-C list of colour-normalizers (instances
of `matplotlib.colors.BoundaryNorm` or similar). If None, will use
default normalizer for each field.
:param refl_opacity: Opacity for reflectivity colour scheme. Used only if
`colour_map_objects is None and colour_norm_objects is None`.
:param plot_colour_bar_flags: length-C numpy array of Boolean flags. If
`plot_colour_bar_flags[k] == True`, will plot colour bar for [k]th
panel. If None, will plot | |
None
return find(self)
def gen_description_doc(self, out):
ds = self.find_aspects('description')
if not ds:
out.gen_text('<NO DESCRIPTION OF %r>' % self.tgtfullname)
else:
for d in ds:
d.gen_doc(out)
def get_id_name(self):
return self.tgtfullname
def get_link_name(self):
return self.tgtfullname
def get_local_name(self):
return self.srclastname
def get_test_name(self):
return self.tgtfullname
def get_name(self):
return self.tgtfullname
def get_Name(self):
# To be used in Name of doc.
n = self.find_aspects('name')
if not n:
name = self.tgtlastname
else:
name = n.tgt.node.arg.strip()
return name
def get_descr_by_subject(self, subject):
return self.pac.get_descr_by_subject(subject)
def init_localview(self, only_vars=0):
self.localview = {}
self.aspects = []
self.aspects_by_tag = {}
if not only_vars:
self.aspects_extend_by_subjects(self.tgt.aspects)
def resolve_aspects(self):
self.init_localview()
if self.src.args:
self.args = [self.env.get_descr_by_subject(
arg) for arg in self.src.args]
self.resolve_special()
def resolve_special(self):
# To be overridden with special checks etc.
pass
def get_the_one_argument(self):
arg = self.src.node.arg.strip()
if self.aspects:
'No children expected for %r' % self.node.tag
return arg
def make_and_test_kind(self, kinds):
ks = []
def flatten(k):
if k.d_tag == 'kind':
for k1 in k.find_kind_aspects():
flatten(k1)
else:
ks.append(k)
if (len(kinds) == 1 and kinds[0].d_tag == 'kind'):
return kinds[0]
for k in kinds:
flatten(k)
kinds = ks
k = Kind()
k.d_tag = 'kind'
k.aspects = kinds
k.tgtfullname = '(%s)' % ('&'.join([x.tgtfullname for x in kinds]))
k.is_lookuped = 1
return k
def make_and_kind(self, kinds):
if (len(kinds) == 1 and kinds[0].d_tag in('kind', 'kind_of')):
return kinds[0]
k = Kind()
k.d_tag = 'kind'
k.aspects = kinds
k.tgtfullname = '(%s)' % ('&'.join([x.tgtfullname for x in kinds]))
k.is_lookuped = True
k.is_synthetic = True
return k
def make_or_kind(self, kinds):
if len(kinds) == 1:
return kinds[0]
else:
k = Superkind()
k.d_tag = 'kind'
k.aspects = kinds
k.tgtfullname = '(%s)' % ('|'.join([x.tgtfullname for x in kinds]))
k.is_lookuped = True
k.is_synthetic = True
return k
class Definition(Description):
d_is_def = 1
d_type = 'definition'
def export_aspects(self, src):
src.__class__ = self.__class__
if src.d_tag == 'import':
src.d_tag = self.d_tag
else:
if src.d_tag != self.d_tag:
# Can't think of how this would happen -
# so not yet converted to .error()
raise ImportError('Different description tag')
src.aspects_extend(self.aspects)
class DescriptionDescription(Description):
d_sub = ('text', )
d_tag = 'description'
def gen_doc(self, out):
self.srcnode.arg_accept(out)
class Default(DescriptionDescription):
def gen_doc(self, out):
arglines = self.srcnode.arg.strip().split('\n')
default = arglines[0]
rest = '\n'.join(arglines[1:])
out.open('dl')
out.open('dt')
out.open('strong')
out.gen_text('Default: ')
out.close()
out.gen_text(default)
out.close()
out.open('dd')
out.gen_text(rest)
self.srcnode.children_accept(out)
out.close()
out.close('dl')
class DescriptionWithHeader(DescriptionDescription):
def gen_doc(self, out):
arglines = self.srcnode.arg.strip().split('\n')
header = arglines[0]
rest = '\n'.join(arglines[1:])
out.open('dl')
out.gen_outer_dt(header)
out.open('dd')
out.gen_text(rest)
self.srcnode.children_accept(out)
out.close()
out.close()
class Comment(DescriptionDescription):
d_tag = 'comment'
pass
class Either(Description):
d_type = 'with_args'
def get_atom_beams(self):
return [beam(self)]
def get_atom_kinds(self):
return [self]
def get_alt_kinds(self):
return self.find_kind_aspects()
class Import(Definition):
d_sub = ('from', 'resolve_by', 'using',
'attribute', 'condition', 'description', 'comment', 'constructor',
'mapping', 'method',
'operator', 'inplace_operator', 'reverse_operator', 'function_operator',
'delitem', 'getitem', 'setitem',
'self',
'subkind_of',
)
def resolve_tgt(self):
self.is_lookuped = 1
using_name, using_node = self.src.imp_using_map.get(
self.src.definame, (self.src.definame, self.src.node))
import_node = self.src.node
ds = [self.pac.import_package(from_name, from_node).
get_descr_by_name(using_name, using_node)
for (from_name, from_node) in self.src.imp_froms]
if len(ds) == 1:
d = ds[0]
else:
d = Product(self, ds, ProductSubject([x.src for x in ds]),
self.src.imp_resolve_mode)
self.tgt = d.tgt
self.tgtfullname = self.mod.tgt_prefix+self.tgt.fullname
self.the_less_specific_descr = d
self.init_localview(only_vars=1)
d.export_aspects(self)
self.aspects_extend_by_subjects(self.src.aspects)
self.deftgt()
def resolve_aspects(self):
pass
class Product(Description):
def __init__(self, env, ds, src, mode):
self.env = env
self.mod = env.mod
self.src = src
self.mode = mode
self.pac = env.pac
tgt = ds[0].tgt
for d in ds[1:]:
if d.tgt is not tgt:
self.error('Import error when importing from multiple packages:\n' +
' Can not make a product of %r (tgt = %r) with %r (tgt = %r)\n' % (
d.src.fullname, d.tgt.fullname, ds[0].src.fullname, ds[0].tgt.fullname) +
' because of different targets.',
d.src.node)
self.tgt = tgt
self.ds = ds
def export_aspects(self, src):
for d in self.ds:
d.export_aspects(src)
def is_more_specific_than(self, d):
for x in self.ds:
if x is d or x.is_more_specific_than(d):
return True
return False
class PackageDescription(UntypedDescription):
def __init__(self, env, tgt, src):
self.env = env
self.pac = self
self.mod = env.mod
self.tgt = tgt
self.src = src
class ErrorDescription:
d_tag = 'error'
def __init__(self, env):
self.env = env
def get_id_name(self):
return '<error>.<error>'
class Package(Description):
d_sub = ('and', 'comment', 'condition', 'document', 'import', 'kind', 'macro',
'superkind',
)
def get_tgtdicts(self):
seen = {id(self.tgtview): 1}
tgtdicts = [self.tgtview]
for p in list(self.imported_packages.values()):
sds = p.get_tgtdicts()
for sd in sds:
if id(sd) not in seen:
seen[id(sd)] = 1
tgtdicts.append(sd)
return tgtdicts
def get_descr_by_name(self, name, context=None):
if name.startswith(self.mod.tgt_prefix):
return self.get_descr_by_tgt_name(name, context)
e = self
parts = name.split('.')
for part in parts:
try:
e = e.localview[part]
except KeyError:
assert context
self.env.error(
'Undefined: %r in %r.' % (part, e.get_id_name()), context,
exception=UndefinedError)
e.resolve_lookuped()
return e
def get_descr_by_subject(self, subject):
name = subject.fullname
if name.startswith(self.srcfullname+'.'):
name = name[len(self.srcfullname)+1:].strip()
else:
self.error('Undefined: %r' % name, subject.node)
return self.get_descr_by_name(name, subject.node)
def get_descr_by_tgt_name(self, name, context=None):
tgtdicts = self.get_tgtdicts()
descrs = []
for tgtdict in tgtdicts:
if name in tgtdict:
d = tgtdict[name]
d.resolve_lookuped()
d = d.get_descr_for_aspect('*')
descrs.append(d)
if not descrs:
self.error('No definition of tgt %r' %
name, context, UndefinedError)
descrs = self.get_most_specific_descrs(descrs)
if len(descrs) > 1:
descrs = self.merge_policy(descrs)
if len(descrs) > 1:
self.error('Conflicting descriptions of %r:%r' % (
name, [d.src.fullname for d in descrs]),
context,
DuplicateError)
return descrs[0]
def get_filename(self):
return self.src.filename
def get_package(self):
return self
def resolve_tgt(self):
self.tgtview = {}
def resolve_aspects(self):
self.imported_packages = {}
self.init_localview()
def import_package(self, name, context):
pac = self.imported_packages.get(name)
if pac is None:
pac = self.env.import_package(name, context)
self.imported_packages[name] = pac
return pac
def get_documents(self):
documents = []
for doc in self.src.documents:
node = doc.node
doc = self.mod.Document.document(node, self)
documents.append(doc)
return documents
class Attribute(Definition):
d_sub = ('attribute', 'comment', 'description', 'description_with_header',
'either', 'kind_of', 'mapping', 'method', 'self')
def export_aspects(self, src):
src.__class__ = self.__class__
src.aspects_extend(self.aspects)
def get_attr_name(self):
return self.tgtlastname
def get_name(self):
return self.tgtlastname
def get_kind(self):
kas = self.find_kind_aspects()
return self.make_and_kind(kas)
def get_kind_name(self):
k = self.get_kind()
if k.d_tag == 'kind_of':
kas = k.find_kind_aspects()
if len(kas) == 1:
k = kas[0]
else:
raise ValueError("Don't know how to name this kind, %r" % self)
return k.tgtfullname
def get_link_name(self):
# xxx needs smoother logic
s = '%s.%s' % (self.get_descr_by_subject(
self.tgt.parent).get_link_name(), self.tgt.lastname)
return s
def get_test_kind(self):
kas = self.find_kind_aspects()
return self.make_and_test_kind(kas)
def is_method(self):
return (self.find_aspects('mapping') and
not self.find_aspects('kind_of'))
def get_op_name(self):
return self.get_attr_name()
class KindOf(Description):
d_type = 'with_args'
d_sub = ()
class SubkindOf(Description):
d_type = 'with_args'
d_sub = ('description',)
class Kind(Definition):
d_sub = ('attribute', 'condition', 'description', 'comment', 'constructor',
'example',
'mapping', 'method',
'operator', 'inplace_operator', 'reverse_operator', 'function_operator',
'self',
'subkind_of',
'delitem', 'getitem', 'setitem',
)
def get_attributes(self):
return self.find_aspects('attribute')
def get_mappings(self):
return self.find_aspects('mapping')
class Superkind(Definition):
d_sub = ('comment', 'description', 'example', 'superkind_of')
def get_local_name(self):
return self.srclastname
class SuperkindOf(Description):
d_type = 'with_args'
def get_examples(self, enough=1):
examples = Description.get_examples(self, enough)
if len(examples) < enough:
for ka in self.find_kind_aspects():
if ka is self:
continue
examples.extend(ka.get_examples(enough-len(examples)))
if len(examples) >= enough:
break
return examples
class Example(Description):
d_sub = ('comment', 'description', 'in_context')
partab = {"'''": "'''",
'"""': '"""',
'(': ')',
'[': ']',
'{': '}'
}
def get_ex_text(self):
return self.src.ex_text
def get_examples(self, get_all=False):
return [self]
def get_ctx_text(self):
asp = self.find_aspects('in_context')
if not asp:
return ''
# It is of length 1, has been checked.
return asp[0].tgt.node.arg.strip()
def get_use_text(self, x):
return x
class InContext(Description):
d_max_occur = 1
class Defines(Description):
d_type = 'with_args'
def get_defined_tgt_names(self):
return [x.tgtfullname for x in self.find_aspects('arg')]
class Macro(Definition):
def export_aspects(self, src):
src.__class__ = self.__class__
src.tgtnode = self.tgtnode
def use(self, options):
return self.mod.SpecNodes.node_of_taci(
'block', '', self.tgtnode.children, self.tgtnode.index)
class Self(Description):
d_max_occur = 1
class Mapping(Description):
d_type = 'other'
d_sub = ('alt', 'arg', 'args', 'comment', 'description', 'description_with_header',
'equation',
'draw',
'key_arg',
'optionals',
'precondition', 'postcondition',
'repeat', 'returns',
'self',
'seq',
)
def chk_num_args(self, min, max):
re = self.get_args_re({})
xs = re.sequni()
for x in xs:
try:
if min is not None and min == max and len(x) != min:
self.error(
'%s requires %d argument%s specified, got %d.' % (
self.d_tag, min, 's'[min == 1:], len(x)),
self.src.node)
elif min is not None and len(x) < min:
self.error(
'%s requires at least %d argument%s specified, got %d.' % (
self.d_tag, min, 's'[min == 1:], len(x)),
self.src.node)
elif max is not None and len(x) > min:
self.error(
'%s can take at most %d argument%s specified, got %d.' % (
self.d_tag, max, 's'[max == 1:], len(x)),
self.src.node)
except ReportedError:
pass
def get_arg_kinds(self):
ak = []
for a in self.find_aspects('args'):
ak.extend(list(a.args))
return ak
def get_args_examples(self, mapname, top_kind):
# Get arguments example, esp. for test purposes
try:
opt = {'get_examples': True}
re = self.get_args_re(opt)
| |
import math
import time
import requests
import pandas as pd
import dask.delayed
from time import sleep
from dask import compute
from bs4 import BeautifulSoup
from datetime import date, datetime
def get_page(url):
"""
returns a soup object that contains all the information of a given webpage
"""
tos = str(datetime.now())
result = requests.get(url)
content = result.content
page = BeautifulSoup(content, features='html')
return page, tos
def get_room_classes(soup_page):
"""
returns all the listings that can be found on the page (soup object) in a list
"""
rooms = soup_page.findAll('div', {'class':'_8ssblpx'}) # _8ssblpx _uhpzdny _gig1e7 _1wcpzyga
result = []
for room in rooms:
result.append(room)
return result
def get_listing_link(listing):
"""
returns the URL link of given listing
"""
listing_link = 'http://airbnb.com' + listing.find('a')['href']
listing_link = listing_link.split('?')[0]
return listing_link
def get_listing_title(listing):
"""
returns the title of given listing
"""
title = listing.find('meta')['content']
title = title.split(' - null - ')
return title[0]
def get_top_row(listing):
"""
returns the top row of given listing's info
"""
top_row = listing.find('div', {'class':'_1tanv1h'}).text # _167gordg
top_row = top_row.split(' in ')
# what are we looking at?
what_it_is = top_row[0]
# where is it?
where_it_is = top_row[1]
return what_it_is, where_it_is
def get_room_info(listing):
"""
returns room info of listing
"""
room_info = listing.find('div', {'class', '_kqh46o'}).text
split_info = [i.split() for i in room_info.split(' · ')]
room_dict = {}
for i in split_info:
if i not in [['Studio'], ['Half-bath']]:
if len(i) == 2:
room_dict[i[1]] = i[0]
# shared-baths
elif len(i) == 3:
i = [i[0], '-'.join([i[1], i[2]])]
room_dict[i[1]] = i[0]
else:
if i[1] == 'total':
room_dict['bedrooms'] = [i[0]]
else:
print(f'unexpected room_info | unexpected split_info len(i)=={len(i)}!=2!=3\n{i}')
room_dict[' '.join(i)] = i[0]
else:
# Half-baths and Studios
if i[0] == 'Studio':
room_dict['is_studio'] = True
room_dict[i[0]] = 0
# need better solution for bedrooms
weird_bedrooms = 0
try:
b = room_dict['bedrooms']
del b
except:
try:
room_dict['bedrooms'] = room_dict['bedroom']
except:
try:
room_dict['bedrooms'] = room_dict['Studio']
except:
weird_bedrooms += 1
print(f'weird bedrooms {weird_bedrooms}')
room_dict['bedrooms'] = room_dict.get('bedrooms')
try:
room_dict['baths']
except:
try:
room_dict['baths'] = room_dict['bath']
except:
room_dict['baths'] = None
room_dict['half_baths'] = room_dict.get('Half-bath')
room_dict['shared_baths'] = room_dict.get('shared-baths')
room_dict['is_studio'] = room_dict.get('is_studio', False)
room_dict['beds'] = room_dict.get('beds')
room_dict['guests'] = room_dict.get('beds')
# check for bedrooms list
if type(room_dict['bedrooms']) == list:
if len(room_dict['bedrooms']) == 1:
room_dict['bedrooms'] = float(room_dict['bedrooms'][0])
else:
raise Exception(f'unexpected bedrooms list | {room_dict["bedrooms"]}')
room_dict = {key:value for key,value in room_dict.items() if key in ['guests', 'bedrooms', 'beds', 'is_studio', 'baths', 'half_baths', 'shared_baths']}
return room_dict
def get_room_price(listing):
"""
returns the nightly rate (price) of given listing
"""
price_text = listing.find('div', {'class':'_ls0e43'}).text
price = price_text.split('$')
price = price[1]
# extract float value
price = price.split(" ")[0] # skip the $
# remove possible / at end of string
if '/' in price:
price = price[:len(price) - 1]
# adjust for places with > 999 reviews
if ',' in price:
price = ''.join(price.split(','))
return float(price)
def get_room_rating_and_reviews(listing):
"""
returns star rating and number of reviews of given listing
"""
try:
output = listing.find('span', {'class':'_18khxk1'}).text
output = output.split('\xa0')
avg_rating = float(output[0])
n_reviews = float(output[1][:-1].split('(')[1])
return avg_rating, n_reviews
except:
try:
return listing.find('span', {'class':'_18khxk1'}), listing.find('span', {'class':'_18khxk1'})
except:
raise Exception(f'get_room_rating_and_reviews | listing == {type(listing), len(listing)}')
class airbnb_scrape():
def __init__(self, location, location_alias):
"""
set location, base (url) link, and blank record books
"""
self.base_link = f'http://www.airbnb.com/s/{location}/homes'
self.location = location
self.location_alias = location_alias
self.n_pages = None
self.n_results = None
self.page_urls = []
self.data_dir = 'data/'
# set known basic amenities
self.possible = ['Gym', 'Wifi', 'Self check-in', 'Air conditioning', 'Pets allowed', 'Indoor fireplace', 'Hot tub', 'Free parking', 'Pool', 'Kitchen', 'Breakfast', 'Elevator', 'Washer', 'Dryer',
'Heating', 'Waterfront', 'Dishwasher', 'Beachfront', 'Ski-in/Ski-out', 'Terrace', 'Sonos sound system', 'BBQ grill', 'Hair dryer', "Chef's kitchen", 'Wet bar', 'Sun loungers',
'Home theater', 'Housekeeping', 'Gated property', 'Gas fireplace', 'Plunge pool', 'Infinity pool', 'Sun deck', 'Game room', 'Surround sound system', 'Resort access']
# set current schema column names
self.names = ['ds', 'search_filter', 'url', 'title', 'type', 'location', 'guests', 'bedrooms', 'beds', 'is_studio', 'baths', 'half_baths', 'shared_baths', 'price', 'avg_rating', 'n_reviews', 'gym_bool',
'wifi_bool', 'self_check_in_bool', 'air_conditioning_bool', 'pets_allowed_bool', 'indoor_fireplace_bool', 'hot_tub_bool', 'free_parking_bool', 'pool_bool', 'kitchen_bool', 'breakfast_bool',
'elevator_bool', 'washer_bool', 'dryer_bool', 'heating_bool', 'waterfront_bool', 'dishwasher_bool', 'beachfront_bool', 'ski_in_ski_out_bool', 'terrace_bool', 'sonos_sound_system_bool',
'bbq_grill_bool', 'hair_dryer_bool', 'chefs_kitchen_bool', 'wet_bar_bool', 'sun_loungers_bool', 'home_theater_bool', 'housekeeping_bool', 'gated_property_bool', 'gas_fireplace_bool',
'plunge_pool_bool', 'infinity_pool_bool', 'sun_deck_bool', 'game_room_bool', 'surround_sound_system_bool', 'resort_access_bool']
self.dtypes = {'ds': 'object', 'search_filter': 'object', 'url': 'object', 'title': 'object', 'type': 'object', 'location': 'object', 'guests': 'float64', 'bedrooms': 'float64', 'beds': 'float64',
'is_studio': 'bool', 'baths': 'float64', 'half_baths': 'float64', 'shared_baths': 'float64', 'price': 'float64', 'avg_rating': 'float64', 'n_reviews': 'float64', 'gym_bool': 'bool',
'wifi_bool': 'bool', 'self_check_in_bool': 'bool', 'air_conditioning_bool': 'bool', 'pets_allowed_bool': 'bool', 'indoor_fireplace_bool': 'bool', 'hot_tub_bool': 'bool', 'free_parking_bool':
'bool', 'pool_bool': 'bool', 'kitchen_bool': 'bool', 'breakfast_bool': 'bool', 'elevator_bool': 'bool', 'washer_bool': 'bool', 'dryer_bool': 'bool', 'heating_bool': 'bool',
'waterfront_bool': 'bool', 'dishwasher_bool': 'bool', 'beachfront_bool': 'bool', 'ski_in_ski_out_bool': 'bool', 'terrace_bool': 'bool', 'sonos_sound_system_bool': 'bool',
'bbq_grill_bool': 'bool', 'hair_dryer_bool': 'bool', 'chefs_kitchen_bool': 'bool', 'wet_bar_bool': 'bool', 'sun_loungers_bool': 'bool', 'home_theater_bool': 'bool', 'housekeeping_bool': 'bool',
'gated_property_bool': 'bool', 'gas_fireplace_bool': 'bool', 'plunge_pool_bool': 'bool', 'infinity_pool_bool': 'bool', 'sun_deck_bool': 'bool', 'game_room_bool': 'bool',
'surround_sound_system_bool': 'bool', 'resort_access_bool': 'bool'}
def get_basic_facilities(self, listing):
'''
returns a dictionary of the given listing's basic facilities with True / None values based on known possible basic facilites
'''
# make list of this listing's basic facilites
try:
basic_facilities = listing.findAll("div", {"class":"_kqh46o"})[1].text
basic_facilities = basic_facilities.split(' · ')
except:
basic_facilities = []
# open a record for this listing
room_dict = {}
# add each basic facility to this room's record
for f in basic_facilities:
if f in self.possible:
room_dict[f] = True
else:
# looks liek we have a new basic facility
i = input(f'unexpected basic_facilites | {f} | is new? (y/n) ')
if i == 'y':
i = input(f'ok, new basic facility\nwhat should the column name be?\ne.g. Hot tub is hot_tub_bool\n"exit" to quit\n column name == ')
if i != 'exit':
# set new amenity
room_dict[f] = True
# update possible amenities and column names
self.possible.append(f)
self.names.append(i)
print(f'\nnew self.possible ==\n{self.possible}\n\nnew self.names ==\n{self.names}\n\nplease update now (sleeping 60 seconds)\n')
sleep(60)
else:
raise Exception(f"not sure what's going on.. | unexpected basic_facilites | {f} | user exit")
else:
raise Exception(f"not sure what's going on.. | unexpected basic_facilites | {f}")
# add None for any basic facilities this listing doesn't offer
for f in self.possible:
room_dict[f] = room_dict.get(f, None)
return room_dict
def find_n_results(self, soup_page):
"""
finds total number of search results from page 1 (of search results)
"""
try:
# keep track of how many results we have
self.n_results = soup_page.find('div', {'class':'_1h559tl'}).text
except:
raise Exception('n results not found on 1st page')
def find_n_pages(self, soup_page, listings_per_page=20):
"""
finds number of existing pages from 1st page of search results
"""
try:
n_results_string = soup_page.find('div', {'class':'_1h559tl'}).text
# check if 300+ club
if '300+' in n_results_string:
self.n_pages = 15
else:
split_results_string = n_results_string.split(' of ')
n_total_results_string = split_results_string[1]
# check for unknown + edge case
if '+' in n_total_results_string:
raise Exception(f'+ in n_total_results_string but 300+ is not\nn_total_results_string == {n_total_results_string}')
else:
# find number of results
split_total_results_string = n_total_results_string.split(' ')
n_total_results = int(split_total_results_string[0])
n_pages = n_total_results / listings_per_page
n_pages = math.ceil(n_pages)
self.n_pages = n_pages
except:
print(f'find_n_pages error | {self.location}')
self.n_pages = 1
# tell me how many pages there are
print(self.n_pages)
def make_page_urls(self, base_page, n_pages='auto', listings_per_page=20):
"""
makes pages for search results (sets of 20)
"""
# reset page urls
self.page_urls = []
# if n_pages wasn't set
if n_pages == 'auto':
# find out how many pages there are
self.find_n_pages(base_page, listings_per_page=listings_per_page)
# items_offset is 1st filter (?) or after 1st filter (&)
if '?' not in base_page:
c = '?'
else:
c = '&'
# create page urls
for i in range(self.n_pages):
# 1st page alread done earlier
if i != 0:
url = f'{base_page}{c}items_offset={i * listings_per_page}'
self.page_urls.append(url)
else:
pass
def record_dataset(self, listings, tos, _filter):
"""
take scraped room classes and record their information to csv
tos: time of scrape
> str datetime.datetime.now()
_filter: filter applied to scrape
> str, None if no filter
"""
data = []
for l in listings:
# listing link
a = get_listing_link(l)
# listing title
b = get_listing_title(l)
# top row info
c, | |
str(num_in_cat), "MCFLIRT_absolute_displacement_maximum_"+file, "mm", "float", "MCFLIRT absolute displacement maximum for "+file, str(abs_displacement_max))
num_in_cat+=1
write_to_IDP_file(subj, "MCFLIRT_abs_disp_median_"+file, "tvb_IDP_MCFLIRT_disp", str(num_in_cat), "MCFLIRT_absolute_displacement_median_"+file, "mm", "float", "MCFLIRT absolute displacement median for "+file, str(abs_displacement_median))
num_in_cat+=1
write_to_IDP_file(subj, "MCFLIRT_abs_disp_mean_"+file, "tvb_IDP_MCFLIRT_disp", str(num_in_cat), "MCFLIRT_absolute_displacement_mean_"+file, "mm", "float", "MCFLIRT absolute displacement mean for "+file, str(abs_displacement_mean))
num_in_cat+=1
write_to_IDP_file(subj, "MCFLIRT_abs_disp_range_"+file, "tvb_IDP_MCFLIRT_disp", str(num_in_cat), "MCFLIRT_absolute_displacement_range_"+file, "mm", "float", "MCFLIRT absolute displacement range for "+file, str(abs_displacement_range))
num_in_cat+=1
write_to_IDP_file(subj, "MCFLIRT_abs_disp_proportion_gt_one_"+file, "tvb_IDP_MCFLIRT_disp", str(num_in_cat), "MCFLIRT_absolute_displacement_proportion_greaterthan_one_"+file, "proportion out of 1", "float", "MCFLIRT absolute displacement - proportion of time units with displacement greater than 1mm for "+file, str(abs_displacement_proportion_gt_one))
num_in_cat+=1
write_to_IDP_file(subj, "MCFLIRT_abs_disp_num_gt_one_"+file, "tvb_IDP_MCFLIRT_disp", str(num_in_cat), "MCFLIRT_absolute_displacement_number_greaterthan_one_"+file, "time (seconds?)", "float", "MCFLIRT absolute displacement - number of time units with displacement greater than 1mm for "+file, str(abs_displacement_num_gt_one))
num_in_cat+=1
except:
print("ERROR: prefiltered_func_data_mcf file not found")
except:
print("ERROR: no fMRI folder in subject directory")
def homotopic(subj,LUT_txt):
#get indices of homotopic pairs
#get the fc value for each pair
#get distribution of these fc values
#import SC data
LUT = ""
try:
#LUT = np.loadtxt(LUT_txt)
with open(LUT_txt) as f:
LUT = f.read().splitlines()
except:
print("ERROR: LUT file not found")
counter = 0
temp_list=[]
while counter < np.shape(LUT)[0]:
temp_list.append(LUT[counter].split("\t"))
counter +=1
LUT=temp_list
index_pair_list = []
counter = 0
while counter < np.shape(LUT)[0]:
LUT[counter][1]
counter1 = counter + 1
while counter1 < np.shape(LUT)[0]:
if LUT[counter1][1].replace("lh","").replace("LH","").replace("rh","").replace("RH","") == LUT[counter][1].replace("lh","").replace("LH","").replace("rh","").replace("RH",""):
index_pair_list.append((counter,counter1))
#record counter1 and counter as a pair
break
else:
counter1 += 1
counter += 1
try:
num_in_cat=1
for file in os.listdir(subj + "/fMRI/"):
if file.endswith(".ica"):
#import FC and TS data
fc_path = os.path.join(subj + "/fMRI/", file, "fc.txt")
ts_path = os.path.join(subj + "/fMRI/", file, "ts.txt")
FC = ""
norm_ts = ""
try:
FC = np.loadtxt(fc_path)
norm_ts = zscore(np.loadtxt(ts_path))
# norm_ts=np.loadtxt(subj + '/fMRI/rfMRI_0.ica/norm_ts.txt');
except:
print("ERROR: fc, ts file not found")
homotopic_sum = 0
for pair in index_pair_list:
homotopic_sum += FC[pair[0]][pair[1]]
homotopic_mean = homotopic_sum/len(index_pair_list)
print("---------")
print("HOMOTOPIC")
print(file)
print("---------")
print (homotopic_mean)
write_to_IDP_file(subj, "FC_homotopic_mean_"+file, "tvb_IDP_homotopic", str(num_in_cat), "FC_homotopic_mean_"+file, "pearson correlation coefficient", "float", "Functional connectivity homotopic mean for "+file, str(homotopic_mean))
num_in_cat +=1
except:
print("ERROR: no fMRI folder in subject directory")
def fmri_SNR_numvol(subj, BB_BIN_DIR):
try:
num_in_cat=1
for file in os.listdir(subj + "/fMRI/"):
if file.endswith(".ica"):
SNR_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_SNR_IDP_gen.sh'), subj, file, os.path.join(subj, "fMRI", file, "filtered_func_data")], stdout=subprocess.PIPE)
SNR_result = SNR_result.stdout.decode('utf-8').strip()
clean_SNR_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_SNR_IDP_gen.sh'), subj, file, os.path.join(subj, "fMRI", file, "filtered_func_data_clean")], stdout=subprocess.PIPE)
clean_SNR_result = clean_SNR_result.stdout.decode('utf-8').strip()
numvol_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_numvol_IDP_gen.sh'), os.path.join(subj, "fMRI", file[:-3]+"nii.gz")], stdout=subprocess.PIPE)
numvol_result = numvol_result.stdout.decode('utf-8').strip()
print("---------")
print(file + "_SNR_num_vol")
print("---------")
print (SNR_result)
print (clean_SNR_result)
print (numvol_result)
write_to_IDP_file(subj, file+"_TSNR", "tvb_IDP_func_TSNR", str(num_in_cat), "QC_"+file+"_tSNR", "ratio", "float", "Temporal signal-to-noise ratio in the pre-processed "+file+" - reciprocal of median (across brain voxels) of voxelwise mean intensity divided by voxelwise timeseries standard deviation", str(SNR_result))
num_in_cat +=1
write_to_IDP_file(subj, file+"_cleaned_TSNR", "tvb_IDP_func_TSNR", str(num_in_cat), "QC_"+file+"_cleaned_tSNR", "ratio", "float", "Temporal signal-to-noise ratio in the artefact-cleaned pre-processed "+file+" - reciprocal of median (across brain voxels) of voxelwise mean intensity divided by voxelwise timeseries standard deviation", str(clean_SNR_result))
num_in_cat +=1
write_to_IDP_file(subj, file+"_num_vol", "tvb_IDP_func_TSNR", str(num_in_cat), "QC_"+file+"_num_vol", "volumes", "int", "Number of volumes in "+file+" scan", str(numvol_result))
num_in_cat +=1
if file.endswith(".feat"):
SNR_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_SNR_IDP_gen.sh'), subj, file, os.path.join(subj, "fMRI", file, "filtered_func_data")], stdout=subprocess.PIPE)
SNR_result = SNR_result.stdout.decode('utf-8').strip()
numvol_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_numvol_IDP_gen.sh'), os.path.join(subj, "fMRI", file[:-4]+"nii.gz")], stdout=subprocess.PIPE)
numvol_result = numvol_result.stdout.decode('utf-8').strip()
print("---------")
print(file + "_SNR_num_vol")
print("---------")
print (SNR_result)
print (numvol_result)
write_to_IDP_file(subj, file+"_TSNR", "tvb_IDP_func_TSNR", str(num_in_cat), "QC_"+file+"_tSNR", "ratio", "float", "Temporal signal-to-noise ratio in the pre-processed "+file+" - reciprocal of median (across brain voxels) of voxelwise mean intensity divided by voxelwise timeseries standard deviation", str(SNR_result))
num_in_cat +=1
write_to_IDP_file(subj, file+"_num_vol", "tvb_IDP_func_TSNR", str(num_in_cat), "QC_"+file+"_num_vol", "volumes", "int", "Number of volumes in "+file+" scan", str(numvol_result))
num_in_cat +=1
except:
print("ERROR: fmri SNR or numvol error")
def susceptibility_SNR(subj, BB_BIN_DIR):
try:
num_in_cat=1
susceptibility_mask_gen = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_susceptibility_mask_gen.sh'), subj], stdout=subprocess.PIPE)
susceptibility_parc_list=susceptibility_mask_gen.stdout.decode('utf-8').strip().splitlines()
non_susc_mask=susceptibility_parc_list[0]
susc_mask=susceptibility_parc_list[1]
parclist_dict={non_susc_mask:"non-susceptible",susc_mask:"susceptible"}
for susceptibility_parc in susceptibility_parc_list:
for file in os.listdir(subj + "/fMRI/"):
if file.endswith(".ica"):
SNR_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_susceptibility_SNR_IDP_gen.sh'), subj, os.path.join("fMRI", file, "filtered_func_data"), susceptibility_parc, file, "ica", parclist_dict[susceptibility_parc]], stdout=subprocess.PIPE)
SNR_result = SNR_result.stdout.decode('utf-8').strip()
clean_SNR_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_susceptibility_SNR_IDP_gen.sh'), subj, os.path.join("fMRI", file, "filtered_func_data_clean"), susceptibility_parc, file, "ica", parclist_dict[susceptibility_parc]], stdout=subprocess.PIPE)
clean_SNR_result = clean_SNR_result.stdout.decode('utf-8').strip()
print("---------")
print(file + "_" + susceptibility_parc + "_susceptibility_SNR")
print("---------")
print (SNR_result)
print (clean_SNR_result)
write_to_IDP_file(subj, file+"_"+parclist_dict[susceptibility_parc]+"_TSNR", "tvb_IDP_func_susceptibility_SNR", str(num_in_cat), "QC_"+file+"_"+parclist_dict[susceptibility_parc]+"_tSNR", "ratio", "float", "Temporal signal-to-noise ratio in the pre-processed "+file+" "+parclist_dict[susceptibility_parc]+" regions - median (across brain voxels) of voxelwise mean intensity divided by voxelwise timeseries standard deviation", str(SNR_result))
num_in_cat +=1
write_to_IDP_file(subj, file+"_"+parclist_dict[susceptibility_parc]+"_cleaned_TSNR", "tvb_IDP_func_susceptibility_SNR", str(num_in_cat), "QC_"+file+"_"+parclist_dict[susceptibility_parc]+"_cleaned_tSNR", "ratio", "float", "Temporal signal-to-noise ratio in the artefact-cleaned pre-processed "+file+" "+parclist_dict[susceptibility_parc]+" regions - median (across brain voxels) of voxelwise mean intensity divided by voxelwise timeseries standard deviation", str(clean_SNR_result))
num_in_cat +=1
if file.endswith(".feat"):
SNR_result = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_susceptibility_SNR_IDP_gen.sh'), subj, os.path.join("fMRI", file, "filtered_func_data"), susceptibility_parc, file, "feat", parclist_dict[susceptibility_parc]], stdout=subprocess.PIPE)
SNR_result = SNR_result.stdout.decode('utf-8').strip()
print("---------")
print(file + "_" + susceptibility_parc + "_susceptibility_SNR")
print("---------")
print (SNR_result)
write_to_IDP_file(subj, file+"_"+parclist_dict[susceptibility_parc]+"_TSNR", "tvb_IDP_func_susceptibility_SNR", str(num_in_cat), "QC_"+file+"_"+parclist_dict[susceptibility_parc]+"_tSNR", "ratio", "float", "Temporal signal-to-noise ratio in the pre-processed "+file+" "+parclist_dict[susceptibility_parc]+" regions - median (across brain voxels) of voxelwise mean intensity divided by voxelwise timeseries standard deviation", str(SNR_result))
num_in_cat +=1
except:
print("ERROR: susceptibility SNR error")
def func_head_motion(subj, BB_BIN_DIR):
try:
num_in_cat=1
for file in os.listdir(subj + "/fMRI/"):
if file.endswith(".ica") or file.endswith(".feat"):
head_motion = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_IDP_func_head_motion.sh'), subj, os.path.join(subj, "fMRI", file, "mc/prefiltered_func_data_mcf_rel_mean.rms")], stdout=subprocess.PIPE)
head_motion = head_motion.stdout.decode('utf-8').strip()
print("---------")
print(file + "_func_head_motion")
print("---------")
print (head_motion)
write_to_IDP_file(subj, file+"_head_motion", "tvb_IDP_func_head_motion", str(num_in_cat), "IDP_"+file+"_head_motion", "mm", "float", "Mean "+file+" head motion, averaged across space and timepoints", str(head_motion))
num_in_cat +=1
except:
print("ERROR: func_head_motion error")
def func_task_activation(subj, BB_BIN_DIR):
try:
num_in_cat=1
for file in os.listdir(subj + "/fMRI/"):
if file.endswith(".feat"):
task_activation = subprocess.run([os.path.join(BB_BIN_DIR, 'tvb_bb_QC/tvb_IDP_func_task_activation.sh'), subj, file], stdout=subprocess.PIPE)
task_activation = task_activation.stdout.decode('utf-8').strip()
task_activation = task_activation.split(" ")
print("---------")
print(file + "_func_task_activation")
print("---------")
print (task_activation)
write_to_IDP_file(subj, file+"_median_shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_BOLD_shapes", "%", "float", "Median BOLD effect (in group-defined mask) for shapes activation (in task fMRI data)", str(task_activation[0]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_BOLD_shapes", "%", "float", "90th percentile of the BOLD effect (in group-defined mask) for shapes activation (in task fMRI data) ", str(task_activation[1]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_median_zstat_shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_zstat_shapes", "Z", "float", "Median z-statistic (in group-defined mask) for shapes activation (in task fMRI data)", str(task_activation[2]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_zstat_shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_zstat_shapes", "Z", "float", "90th percentile of the z-statistic (in group-defined mask) for shapes activation (in task fMRI data)", str(task_activation[3]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_median_faces", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_BOLD_faces", "%", "float", "Median BOLD effect (in group-defined mask) for faces activation (in task fMRI data)", str(task_activation[4]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_faces", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_BOLD_faces", "%", "float", "90th percentile of the BOLD effect (in group-defined mask) for faces activation (in task fMRI data)", str(task_activation[5]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_median_zstat_faces", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_zstat_faces", "Z", "float", "Median z-statistic (in group-defined mask) for faces activation (in task fMRI data)", str(task_activation[6]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_zstat_faces", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_zstat_faces", "Z", "float", "90th percentile of the z-statistic (in group-defined mask) for faces activation (in task fMRI data)", str(task_activation[7]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_median_faces-shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_BOLD_faces-shapes", "%", "float", "Median BOLD effect (in group-defined mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[8]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_faces-shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_BOLD_faces-shapes", "%", "float", "90th percentile of the BOLD effect (in group-defined mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[9]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_median_zstat_faces-shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_zstat_faces-shapes", "Z", "float", "Median z-statistic (in group-defined mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[10]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_zstat_faces-shapes", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_zstat_faces-shapes", "Z", "float", "90th percentile of the z-statistic (in group-defined mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[11]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_median_faces-shapes_amygdala", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_BOLD_faces-shapes_amygdala", "%", "float", "Median BOLD effect (in group-defined amygdala activation mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[12]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_faces-shapes_amygdala", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_BOLD_faces-shapes_amygdala", "%", "float", "90th percentile of the BOLD effect (in group-defined amygdala activation mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[13]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_median_zstat_faces-shapes_amygdala", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_median_zstat_faces-shapes_amygdala", "Z", "float", "Median z-statistic (in group-defined amygdala activation mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[14]))
num_in_cat +=1
write_to_IDP_file(subj, file+"_p90_zstat_faces-shapes_amygdala", "tvb_IDP_func_task_activation", str(num_in_cat), "IDP_"+file+"_90th-percentile_zstat_faces-shapes_amygdala", "Z", "float", "90th percentile of the z-statistic (in group-defined amygdala activation mask) for faces-shapes contrast (in task fMRI data)", str(task_activation[15]))
num_in_cat +=1
except:
print("ERROR: func_task_activation error")
def all_align_to_T1(subj, BB_BIN_DIR):
try:
num_in_cat=1
baseT2=os.path.join(subj,"T2_FLAIR/T2_FLAIR_brain")
basedMRI=os.path.join(subj,"dMRI/dMRI/data_B0")
baseSWI=os.path.join(subj,"SWI/SWI_TOTAL_MAG_to_T1")
baseDict={baseT2:"T2_FLAIR", basedMRI:"dMRI", baseSWI:"SWI"}
for file in [baseT2, basedMRI, baseSWI]:
align_to_T1 | |
symbolizer_path = os.path.abspath(os.path.join('src', 'third_party',
'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer'))
disable_sandbox_flag = '--no-sandbox'
if args and 'layout_test_wrapper' in args[0]:
disable_sandbox_flag = '--additional-drt-flag=%s' % disable_sandbox_flag
# Symbolization of sanitizer reports.
if sys.platform in ['win32', 'cygwin']:
# On Windows, the in-process symbolizer works even when sandboxed.
symbolization_options = []
elif options.enable_tsan or options.enable_lsan:
# TSan and LSan are not sandbox-compatible, so we can use online
# symbolization. In fact, they need symbolization to be able to apply
# suppressions.
symbolization_options = ['symbolize=1',
'external_symbolizer_path=%s' % symbolizer_path,
'strip_path_prefix=%s' % options.strip_path_prefix]
elif options.enable_asan or options.enable_msan:
# ASan and MSan use a script for offline symbolization.
# Important note: when running ASan or MSan with leak detection enabled,
# we must use the LSan symbolization options above.
symbolization_options = ['symbolize=0']
# Set the path to llvm-symbolizer to be used by asan_symbolize.py
extra_env['LLVM_SYMBOLIZER_PATH'] = symbolizer_path
options.use_symbolization_script = True
def AddToExistingEnv(env_dict, key, options_list):
# Adds a key to the supplied environment dictionary but appends it to
# existing environment variables if it already contains values.
assert type(env_dict) is dict
assert type(options_list) is list
env_dict[key] = ' '.join(filter(bool, [os.environ.get(key)]+options_list))
# ThreadSanitizer
if options.enable_tsan:
tsan_options = symbolization_options
AddToExistingEnv(extra_env, 'TSAN_OPTIONS', tsan_options)
# Disable sandboxing under TSan for now. http://crbug.com/223602.
args.append(disable_sandbox_flag)
# LeakSanitizer
if options.enable_lsan:
# Symbolization options set here take effect only for standalone LSan.
lsan_options = symbolization_options
AddToExistingEnv(extra_env, 'LSAN_OPTIONS', lsan_options)
# Disable sandboxing under LSan.
args.append(disable_sandbox_flag)
# AddressSanitizer
if options.enable_asan:
asan_options = symbolization_options
if options.enable_lsan:
asan_options += ['detect_leaks=1']
AddToExistingEnv(extra_env, 'ASAN_OPTIONS', asan_options)
# MemorySanitizer
if options.enable_msan:
msan_options = symbolization_options
if options.enable_lsan:
msan_options += ['detect_leaks=1']
AddToExistingEnv(extra_env, 'MSAN_OPTIONS', msan_options)
def main():
"""Entry point for runtest.py.
This function:
(1) Sets up the command-line options.
(2) Sets environment variables based on those options.
(3) Delegates to the platform-specific main functions.
Returns:
Exit code for this script.
"""
option_parser = optparse.OptionParser(usage=USAGE)
# Since the trailing program to run may have has command-line args of its
# own, we need to stop parsing when we reach the first positional argument.
option_parser.disable_interspersed_args()
option_parser.add_option('--target', default='Release',
help='build target (Debug or Release)')
option_parser.add_option('--pass-target', action='store_true', default=False,
help='pass --target to the spawned test script')
option_parser.add_option('--build-dir', help='ignored')
option_parser.add_option('--pass-build-dir', action='store_true',
default=False,
help='pass --build-dir to the spawned test script')
option_parser.add_option('--test-platform',
help='Platform to test on, e.g. ios-simulator')
option_parser.add_option('--total-shards', dest='total_shards',
default=None, type='int',
help='Number of shards to split this test into.')
option_parser.add_option('--shard-index', dest='shard_index',
default=None, type='int',
help='Shard to run. Must be between 1 and '
'total-shards.')
option_parser.add_option('--run-shell-script', action='store_true',
default=False,
help='treat first argument as the shell script'
'to run.')
option_parser.add_option('--run-python-script', action='store_true',
default=False,
help='treat first argument as a python script'
'to run.')
option_parser.add_option('--generate-json-file', action='store_true',
default=False,
help='output JSON results file if specified.')
option_parser.add_option('--xvfb', action='store_true', dest='xvfb',
default=True,
help='Start virtual X server on Linux.')
option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb',
help='Do not start virtual X server on Linux.')
option_parser.add_option('-o', '--results-directory', default='',
help='output results directory for JSON file.')
option_parser.add_option('--chartjson-file', default='',
help='File to dump chartjson results.')
option_parser.add_option('--log-processor-output-file', default='',
help='File to dump gtest log processor results.')
option_parser.add_option('--builder-name', default=None,
help='The name of the builder running this script.')
option_parser.add_option('--slave-name', default=None,
help='The name of the slave running this script.')
option_parser.add_option('--master-class-name', default=None,
help='The class name of the buildbot master running '
'this script: examples include "Chromium", '
'"ChromiumWebkit", and "ChromiumGPU". The '
'flakiness dashboard uses this value to '
'categorize results. See buildershandler.py '
'in the flakiness dashboard code '
'(use codesearch) for the known values. '
'Defaults to fetching it from '
'slaves.cfg/builders.pyl.')
option_parser.add_option('--build-number', default=None,
help=('The build number of the builder running'
'this script.'))
option_parser.add_option('--step-name', default=None,
help=('The name of the step running this script.'))
option_parser.add_option('--test-type', default='',
help='The test name that identifies the test, '
'e.g. \'unit-tests\'')
option_parser.add_option('--test-results-server', default='',
help='The test results server to upload the '
'results.')
option_parser.add_option('--annotate', default='',
help='Annotate output when run as a buildstep. '
'Specify which type of test to parse, available'
' types listed with --annotate=list.')
option_parser.add_option('--parse-input', default='',
help='When combined with --annotate, reads test '
'from a file instead of executing a test '
'binary. Use - for stdin.')
option_parser.add_option('--parse-result', default=0,
help='Sets the return value of the simulated '
'executable under test. Only has meaning when '
'--parse-input is used.')
option_parser.add_option('--results-url', default='',
help='The URI of the perf dashboard to upload '
'results to.')
option_parser.add_option('--perf-dashboard-id', default='',
help='The ID on the perf dashboard to add results '
'to.')
option_parser.add_option('--perf-id', default='',
help='The perf builder id')
option_parser.add_option('--perf-config', default='',
help='Perf configuration dictionary (as a string). '
'This allows to specify custom revisions to be '
'the main revision at the Perf dashboard. '
'Example: --perf-config="{\'a_default_rev\': '
'\'r_webrtc_rev\'}"')
option_parser.add_option('--supplemental-columns-file',
default='supplemental_columns',
help='A file containing a JSON blob with a dict '
'that will be uploaded to the results '
'dashboard as supplemental columns.')
option_parser.add_option('--revision',
help='The revision number which will be is used as '
'primary key by the dashboard. If omitted it '
'is automatically extracted from the checkout.')
option_parser.add_option('--webkit-revision',
help='See --revision.')
option_parser.add_option('--enable-asan', action='store_true', default=False,
help='Enable fast memory error detection '
'(AddressSanitizer).')
option_parser.add_option('--enable-lsan', action='store_true', default=False,
help='Enable memory leak detection (LeakSanitizer).')
option_parser.add_option('--enable-msan', action='store_true', default=False,
help='Enable uninitialized memory reads detection '
'(MemorySanitizer).')
option_parser.add_option('--enable-tsan', action='store_true', default=False,
help='Enable data race detection '
'(ThreadSanitizer).')
option_parser.add_option('--strip-path-prefix',
default='build/src/out/Release/../../',
help='Source paths in stack traces will be stripped '
'of prefixes ending with this substring. This '
'option is used by sanitizer tools.')
option_parser.add_option('--no-spawn-dbus', action='store_true',
default=False,
help='Disable GLib DBus bug workaround: '
'manually spawning dbus-launch')
option_parser.add_option('--test-launcher-summary-output',
help='Path to test results file with all the info '
'from the test launcher')
option_parser.add_option('--flakiness-dashboard-server',
help='The flakiness dashboard server to which the '
'results should be uploaded.')
option_parser.add_option('--verbose', action='store_true', default=False,
help='Prints more information.')
chromium_utils.AddPropertiesOptions(option_parser)
options, args = option_parser.parse_args()
# Initialize logging.
log_level = logging.INFO
if options.verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level,
format='%(asctime)s %(filename)s:%(lineno)-3d'
' %(levelname)s %(message)s',
datefmt='%y%m%d %H:%M:%S')
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
if not options.perf_dashboard_id:
options.perf_dashboard_id = options.factory_properties.get('test_name')
options.test_type = options.test_type or options.factory_properties.get(
'step_name', '')
if options.run_shell_script and options.run_python_script:
sys.stderr.write('Use either --run-shell-script OR --run-python-script, '
'not both.')
return 1
print '[Running on builder: "%s"]' % options.builder_name
did_launch_dbus = False
if not options.no_spawn_dbus:
did_launch_dbus = _LaunchDBus()
try:
options.build_dir = build_directory.GetBuildOutputDirectory()
if options.pass_target and options.target:
args.extend(['--target', options.target])
if options.pass_build_dir:
args.extend(['--build-dir', options.build_dir])
# We will use this to accumulate overrides for the command under test,
# That we may not need or want for other support commands.
extra_env = {}
# This option is used by sanitizer code. There is no corresponding command
# line flag.
options.use_symbolization_script = False
# Set up extra environment and args for sanitizer tools.
_ConfigureSanitizerTools(options, args, extra_env)
# Set the number of shards environment variables.
# NOTE: Chromium's test launcher will ignore these in favor of the command
# line flags passed in _BuildTestBinaryCommand.
if options.total_shards and options.shard_index:
extra_env['GTEST_TOTAL_SHARDS'] = str(options.total_shards)
extra_env['GTEST_SHARD_INDEX'] = str(options.shard_index - 1)
# If perf config is passed via command line, parse the string into a dict.
if options.perf_config:
try:
options.perf_config = ast.literal_eval(options.perf_config)
assert type(options.perf_config) is dict, (
'Value of --perf-config couldn\'t be evaluated into a dict.')
except (exceptions.SyntaxError, ValueError):
option_parser.error('Failed to parse --perf-config value into a dict: '
'%s' % options.perf_config)
return 1
# Allow factory property 'perf_config' as well during a transition period.
options.perf_config = (options.perf_config or
options.factory_properties.get('perf_config'))
if options.results_directory:
options.test_output_xml = os.path.normpath(os.path.abspath(os.path.join(
options.results_directory, '%s.xml' % options.test_type)))
args.append('--gtest_output=xml:' + options.test_output_xml)
elif options.generate_json_file:
option_parser.error(
'--results-directory is required with --generate-json-file=True')
return 1
if options.factory_properties.get('coverage_gtest_exclusions', False):
_BuildCoverageGtestExclusions(options, args)
temp_files = _GetTempCount()
if options.parse_input:
result = _MainParse(options, args)
elif sys.platform.startswith('darwin'):
test_platform = options.factory_properties.get(
'test_platform', options.test_platform)
if test_platform in ('ios-simulator',):
result = _MainIOS(options, args, extra_env)
else:
result = _MainMac(options, args, extra_env)
elif sys.platform == 'win32':
result = _MainWin(options, args, extra_env)
elif sys.platform == 'linux2':
if options.factory_properties.get('test_platform',
options.test_platform) == 'android':
result = _MainAndroid(options, args, extra_env)
else:
result = _MainLinux(options, args, extra_env)
else:
sys.stderr.write('Unknown sys.platform value %s\n' % repr(sys.platform))
return 1
_UploadProfilingData(options, args)
new_temp_files = _GetTempCount()
if temp_files > new_temp_files:
print >> sys.stderr, (
'Confused: %d files were deleted from %s during the test run') % (
(temp_files - new_temp_files), tempfile.gettempdir())
elif temp_files < new_temp_files:
print >> sys.stderr, (
'%d new files were left in %s: Fix the tests to clean up themselves.'
) % ((new_temp_files - temp_files), tempfile.gettempdir())
# TODO(maruel): Make it an error | |
<gh_stars>1-10
#!/usr/bin/env python3
"""Tests the stats.py package."""
import unittest
from linkograph import stats # The package under test.
from linkograph import linkoCreate # For creating linkographs.
import math # For the log function.
from collections import Counter # For Counter data structures.
class Test_totalLinks(unittest.TestCase):
"""Basic unit tests for totalLinks in the stats package."""
def setUp(self):
"""Set up the parameters for the individual tests."""
if self.id().split('.')[-1] == 'test_totalLinks':
self.testParams = [
{'NumberOfNodes': 0,
'ExpectedTotal': 0},
{'NumberOfNodes': 1,
'ExpectedTotal': 0},
{'NumberOfNodes': 2,
'ExpectedTotal': 1},
{'NumberOfNodes': 3,
'ExpectedTotal': 3},
{'NumberOfNodes': 4,
'ExpectedTotal': 6},
{'NumberOfNodes': 5,
'ExpectedTotal': 10},
{'NumberOfNodes': 6,
'ExpectedTotal': 15}]
def performTestForParams(self):
""""Performs the tests for each set of parameters."""
for params in self.testParams:
actual = stats.totalLinks(params['NumberOfNodes'])
self.assertEqual(
actual,
params['ExpectedTotal'],
("Test fail: Number of Nodes = {}"
" Actual = {}"
" ExpectedTotal = {}")
.format(params['NumberOfNodes'],
actual,
params['ExpectedTotal']))
def test_totalLinks(self):
"""Tests for the correct number of total links."""
self.performTestForParams()
class Test_totalLinkographs(unittest.TestCase):
"""Basic unit tests for totalLinkographs in the stats package."""
def setUp(self):
"""Set up the parameters for the individual tests."""
if self.id().split('.')[-1] == 'test_totalLinkographs':
self.testParams = [
{'NumberOfNodes': 0,
'ExpectedTotal': 1},
{'NumberOfNodes': 1,
'ExpectedTotal': 1},
{'NumberOfNodes': 2,
'ExpectedTotal': 2},
{'NumberOfNodes': 3,
'ExpectedTotal': 8},
{'NumberOfNodes': 4,
'ExpectedTotal': 2**6},
{'NumberOfNodes': 5,
'ExpectedTotal': 2**10},
{'NumberOfNodes': 6,
'ExpectedTotal': 2**15}]
def performTestForParams(self):
""""Performs the tests for each set of parameters."""
for params in self.testParams:
actual = stats.totalLinkographs(params['NumberOfNodes'])
self.assertEqual(
actual,
params['ExpectedTotal'],
("Test fail: Number of Nodes = {}"
" Actual = {}"
" ExpectedTotal = {}")
.format(params['NumberOfNodes'],
actual,
params['ExpectedTotal']))
def test_totalLinkographs(self):
"""Tests for the correct number of total links."""
self.performTestForParams()
class Test_totalLabels(unittest.TestCase):
"""Basic unit tests for totalLabels in the stats package."""
def setUp(self):
"""Set up the parameters for the individual tests."""
singleLabels = linkoCreate.Linkograph(
[({'A'}, set(), {1,2,3}),
({'D'}, {0}, {3,4}),
({'A'}, {0}, {4}),
({'C'}, {0,1}, {4}),
({'A'}, {1,2,3}, set())],
['A', 'B', 'C', 'D'])
simpleLinko = linkoCreate.Linkograph(
[({'A', 'B', 'C'}, set(), {1,2,3}),
({'D'}, {0}, {3,4}),
({'A'}, {0}, {4}),
({'B', 'C'}, {0,1}, {4}),
({'A'}, {1,2,3}, set())],
['A', 'B', 'C', 'D'])
if self.id().split('.')[-1] == 'test_singleLabelPerLine':
self.testParams = [
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': None,
'ExpectedCount': Counter({'A': 3,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': -1,
'upperBound': None,
'ExpectedCount': Counter({'A': 3,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': 0,
'upperBound': None,
'ExpectedCount': Counter({'A': 3,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': 1,
'upperBound': None,
'ExpectedCount': Counter({'A': 2,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 5,
'ExpectedCount': Counter({'A': 3,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 4,
'ExpectedCount': Counter({'A': 3,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 3,
'ExpectedCount': Counter({'A': 2,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': -1,
'upperBound': 5,
'ExpectedCount': Counter({'A': 3,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': 0,
'upperBound': 4,
'ExpectedCount': Counter({'A': 3,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': 1,
'upperBound': 3,
'ExpectedCount': Counter({'A': 1,
'C': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': 1,
'upperBound': 2,
'ExpectedCount': Counter({'A': 1,
'D': 1})},
{'Linkograph': singleLabels,
'lowerBound': 2,
'upperBound': 2,
'ExpectedCount': Counter({'A': 1})},
{'Linkograph': singleLabels,
'lowerBound': 2,
'upperBound': 1,
'ExpectedCount': Counter({})}
]
if self.id().split('.')[-1] == 'test_multipleLabelPerLine':
self.testParams = [
{'Linkograph': simpleLinko,
'lowerBound': -1,
'upperBound': None,
'ExpectedCount': Counter({'A': 3,
'B': 2,
'C': 2,
'D': 1})},
{'Linkograph': simpleLinko,
'lowerBound': 0,
'upperBound': None,
'ExpectedCount': Counter({'A': 3,
'B': 2,
'C': 2,
'D': 1})},
{'Linkograph': simpleLinko,
'lowerBound': 1,
'upperBound': None,
'ExpectedCount': Counter({'A': 2,
'B': 1,
'C': 1,
'D': 1})},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 5,
'ExpectedCount': Counter({'A': 3,
'B': 2,
'C': 2,
'D': 1})},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 4,
'ExpectedCount': Counter({'A': 3,
'B': 2,
'C': 2,
'D': 1})},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 3,
'ExpectedCount': Counter({'A': 2,
'B': 2,
'C': 2,
'D': 1})},
{'Linkograph': simpleLinko,
'lowerBound': 1,
'upperBound': 3,
'ExpectedCount': Counter({'A': 1,
'B': 1,
'C': 1,
'D': 1})}
]
def performTestForParams(self):
""""Performs the tests for each set of parameters."""
for params in self.testParams:
actualCount = stats.totalLabels(params['Linkograph'],
params['lowerBound'],
params['upperBound'])
self.assertEqual(
actualCount,
params['ExpectedCount'],
("Test fail: linkograph {}"
" lowerBound = {}"
" upperBound = {}"
" actualCount = {}"
" ExpectedCount = {}")
.format(params['Linkograph'],
params['lowerBound'],
params['upperBound'],
actualCount,
params['ExpectedCount']))
def test_singleLabelPerLine(self):
"""Tests for correct number of labels with one label per line."""
self.performTestForParams()
def test_multipleLabelPerLine(self):
"""Test for correct number of labels with multiple labels."""
self.performTestForParams()
class Test_percentageOfEntries(unittest.TestCase):
"""Basic unit tests for percentageOfEntries in the stats package."""
def setUp(self):
"""Set up the parameters for the individual tests."""
singleLabels = linkoCreate.Linkograph(
[({'A'}, set(), {1,2,3}),
({'D'}, {0}, {3,4}),
({'A'}, {0}, {4}),
({'C'}, {0,1}, {4}),
({'A'}, {1,2,3}, set())],
['A', 'B', 'C', 'D'])
simpleLinko = linkoCreate.Linkograph(
[({'A', 'B', 'C'}, set(), {1,2,3}),
({'D'}, {0}, {3,4}),
({'A'}, {0}, {4}),
({'B', 'C'}, {0,1}, {4}),
({'A'}, {1,2,3}, set())],
['A', 'B', 'C', 'D'])
if self.id().split('.')[-1] == 'test_singleLabelPerLine':
self.testParams = [
{'Linkograph': singleLabels,
'lowerBound': -1,
'upperBound': None,
'ExpectedPercentage': {'A': 3/5,
'C': 1/5,
'D': 1/5}},
{'Linkograph': singleLabels,
'lowerBound': 0,
'upperBound': None,
'ExpectedPercentage': {'A': 3/5,
'C': 1/5,
'D': 1/5}},
{'Linkograph': singleLabels,
'lowerBound': 1,
'upperBound': None,
'ExpectedPercentage': {'A': 2/4,
'C': 1/4,
'D': 1/4}},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 5,
'ExpectedPercentage': {'A': 3/5,
'C': 1/5,
'D': 1/5}},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 4,
'ExpectedPercentage': {'A': 3/5,
'C': 1/5,
'D': 1/5}},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 3,
'ExpectedPercentage': {'A': 2/4,
'C': 1/4,
'D': 1/4}},
{'Linkograph': singleLabels,
'lowerBound': 1,
'upperBound': 3,
'ExpectedPercentage': {'A': 1/3,
'C': 1/3,
'D': 1/3}},
{'Linkograph': singleLabels,
'lowerBound': 3,
'upperBound': 3,
'ExpectedPercentage': {'C': 1}},
{'Linkograph': singleLabels,
'lowerBound': 4,
'upperBound': 3,
'ExpectedPercentage': {}}
]
if self.id().split('.')[-1] == 'test_multipleLabelPerLine':
self.testParams = [
{'Linkograph': simpleLinko,
'lowerBound': -1,
'upperBound': None,
'ExpectedPercentage': {'A': 3/5,
'B': 2/5,
'C': 2/5,
'D': 1/5}},
{'Linkograph': simpleLinko,
'lowerBound': 0,
'upperBound': None,
'ExpectedPercentage': {'A': 3/5,
'B': 2/5,
'C': 2/5,
'D': 1/5}},
{'Linkograph': simpleLinko,
'lowerBound': 1,
'upperBound': None,
'ExpectedPercentage': {'A': 2/4,
'B': 1/4,
'C': 1/4,
'D': 1/4}},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 5,
'ExpectedPercentage': {'A': 3/5,
'B': 2/5,
'C': 2/5,
'D': 1/5}},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 4,
'ExpectedPercentage': {'A': 3/5,
'B': 2/5,
'C': 2/5,
'D': 1/5}},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 3,
'ExpectedPercentage': {'A': 2/4,
'B': 2/4,
'C': 2/4,
'D': 1/4}},
{'Linkograph': simpleLinko,
'lowerBound': 1,
'upperBound': 3,
'ExpectedPercentage': {'A': 1/3,
'B': 1/3,
'C': 1/3,
'D': 1/3}},
{'Linkograph': simpleLinko,
'lowerBound': 2,
'upperBound': 1,
'ExpectedPercentage': {}}
]
def performTestForParams(self):
""""Performs the tests for each set of parameters."""
for params in self.testParams:
actualPercentage = stats.percentageOfEntries(
params['Linkograph'],
params['lowerBound'],
params['upperBound'])
self.assertEqual(
actualPercentage,
params['ExpectedPercentage'],
("Test fail: linkograph {}"
" lowerBound = {}"
" upperBound = {}"
" actualPercentage = {}"
" ExpectedPercentage = {}")
.format(params['Linkograph'],
params['lowerBound'],
params['upperBound'],
actualPercentage,
params['ExpectedPercentage']))
def test_singleLabelPerLine(self):
"""Tests for correct percentage for linkograph with one label per line."""
self.performTestForParams()
def test_multipleLabelPerLine(self):
"""Test for correct percentage for linkograph with multiple labels."""
self.performTestForParams()
class Test_links(unittest.TestCase):
"""Basic unit tests for links in the stats package."""
def setUp(self):
"""Set up the parameters for the individual tests."""
singleLabels = linkoCreate.Linkograph(
[({'A'}, set(), {1,2,3}),
({'D'}, {0}, {3,4}),
({'A'}, {0}, {4}),
({'C'}, {0,1}, {4}),
({'A'}, {1,2,3}, set())],
['A', 'B', 'C', 'D'])
simpleLinko = linkoCreate.Linkograph(
[({'A', 'B', 'C'}, set(), {1,2,3}),
({'D'}, {0}, {3,4}),
({'A'}, {0}, {4}),
({'B', 'C'}, {0,1}, {4}),
({'A'}, {1,2,3}, set())],
['A', 'B', 'C', 'D'])
if self.id().split('.')[-1] == 'test_singleLabelPerLine':
self.testParams = [
{'Linkograph': singleLabels,
'lowerBound': -1,
'upperBound': None,
'ExpectedLinks': 7},
{'Linkograph': singleLabels,
'lowerBound': 0,
'upperBound': None,
'ExpectedLinks': 7},
{'Linkograph': singleLabels,
'lowerBound': 1,
'upperBound': None,
'ExpectedLinks': 4},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 5,
'ExpectedLinks': 7},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 4,
'ExpectedLinks': 7},
{'Linkograph': singleLabels,
'lowerBound': None,
'upperBound': 3,
'ExpectedLinks': 4},
{'Linkograph': singleLabels,
'lowerBound': 1,
'upperBound': 3,
'ExpectedLinks': 1},
{'Linkograph': singleLabels,
'lowerBound': 2,
'upperBound': 3,
'ExpectedLinks': 0},
{'Linkograph': singleLabels,
'lowerBound': 3,
'upperBound': 2,
'ExpectedLinks': 0},
]
if self.id().split('.')[-1] == 'test_multipleLabelPerLine':
self.testParams = [
{'Linkograph': simpleLinko,
'lowerBound': -1,
'upperBound': None,
'ExpectedLinks': 7},
{'Linkograph': simpleLinko,
'lowerBound': 0,
'upperBound': None,
'ExpectedLinks': 7},
{'Linkograph': simpleLinko,
'lowerBound': 1,
'upperBound': None,
'ExpectedLinks': 4},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 5,
'ExpectedLinks': 7},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 4,
'ExpectedLinks': 7},
{'Linkograph': simpleLinko,
'lowerBound': None,
'upperBound': 3,
'ExpectedLinks': 4},
{'Linkograph': simpleLinko,
'lowerBound': 1,
'upperBound': 3,
'ExpectedLinks': 1},
{'Linkograph': simpleLinko,
'lowerBound': 2,
'upperBound': 3,
'ExpectedLinks': 0},
{'Linkograph': simpleLinko,
'lowerBound': 3,
'upperBound': 2,
'ExpectedLinks': 0},
]
def performTestForParams(self):
""""Performs the tests for each set of parameters."""
for params in | |
sc_idx == 0
block_idx = 0 if sc_idx == 0 else np.max(np.where(np.array([b[0] for b in cv.blocks]) < sc_idx)[0])
block = cv.blocks[block_idx][1]
cv.pos = int(block[0]) + 1 if sc_left else int(block[1])
rcigar = read.cigar[::-1] if cv.cstrand == '-' else read.cigar
idx = len(rcigar) - sc_idx - 1 if cv.cstrand == '-' else sc_idx + 1
cv.cpos = sum([v for c,v in rcigar[:idx] if c in constants.AFFECT_CONTIG])
varseq = read.query_sequence[cv.cpos:(cv.cpos+cv.cvsize)]
refseq = read.get_reference_sequence()
cv.ref = refseq[0] if sc_left else refseq[-1]
cv.alt = '%s]%s:%d]%s' % (varseq, cv.chrom, cv.pos-1, cv.ref) if sc_left else \
'%s[%s:%d[%s' % (cv.ref, cv.chrom, cv.pos+1, varseq)
cv.vid = get_next_id(read.query_name)
CrypticVariant.write_contig_info(ci_file, cv)
print(cv.vcf_output())
def annotate_block_right(cv, read, cpos, olapping, block, block_idx):
'''
extended exon to the *right* of the reference sequence
'''
qseq, rseq = bh.get_block_sequence(read, block_idx)
seq_left_pos = block[1] - max(olapping.end)
cv.ref, cv.alt = rseq[(-seq_left_pos):], ']' + qseq[(-seq_left_pos):]
cv.cpos, cv.pos = cpos, max(olapping.end) + 1
cv.vsize, cv.cvsize = abs(len(cv.alt)-1 - len(cv.ref)), len(cv.alt)-1
return cv
def annotate_block_left(cv, read, cpos, olapping, block, block_idx):
'''
extended exon is to the *left* of the reference sequence
'''
qseq, rseq = bh.get_block_sequence(read, block_idx)
seq_right_pos = min(olapping.start) - block[0]
cv.ref, cv.alt = rseq[:seq_right_pos], qseq[:seq_right_pos] + '['
cv.cpos, cv.pos = cpos, min(olapping.start) - len(cv.ref) + 1
cv.vsize, cv.cvsize = abs(len(cv.alt)-1 - len(cv.ref)), len(cv.alt)-1
return cv
def annotate_blocks(cv, read, chr_ref, ci_file):
'''
Annotate any sequence that is outside of exonic regions
'''
cv.parid = '.' # blocks don't have pairs
novel_blocks = [(idx, block) for idx, block in cv.blocks if bh.is_novel_block(block, chr_ref, MIN_CLIP)]
rcigar = read.cigar[::-1] if cv.cstrand == '-' else read.cigar
for block_idx, block in novel_blocks:
idx = len(rcigar) - block_idx - 1 if cv.cstrand == '-' else block_idx
cpos1 = sum([v for c,v in rcigar[:idx] if c in constants.AFFECT_CONTIG])
cpos2 = sum([v for c,v in rcigar[:idx+1] if c in constants.AFFECT_CONTIG])
# whether sequence block is overlapping, or on left or right side of contig block
olapping = chr_ref[np.logical_and(block[0] < chr_ref.start, block[1] > chr_ref.end)]
left = chr_ref[np.logical_and(block[1] > chr_ref.start, block[1] <= chr_ref.end)]
right = chr_ref[np.logical_and(block[0] >= chr_ref.start, block[0] < chr_ref.end)]
if len(left) > 0 and len(right) > 0:
# retained intron
cv.cvtype = 'RI'
qseq, rseq = bh.get_block_sequence(read, block_idx)
seq_right_pos = block[1] - min(left.start)
seq_left_pos = max(right.end) - block[0]
cv.pos = block[0] + seq_left_pos + 1
cv.ref = rseq[seq_left_pos:(-seq_right_pos)]
cv.alt = ']' + qseq[seq_left_pos:(-seq_right_pos)] + '['
cv.cpos = cpos1 + seq_left_pos
cv.vsize, cv.cvsize = abs(len(cv.alt)-2 - len(cv.ref)), len(cv.alt)-2
cv.vid = get_next_id(read.query_name)
elif len(olapping) > 0:
# annotate left side
cv.cvtype = 'EE'
cv = annotate_block_left(cv, read, cpos2, olapping, block, block_idx)
cv.vid = get_next_id(read.query_name)
print(cv.vcf_output())
CrypticVariant.write_contig_info(ci_file, cv)
# annotate right side
cv = annotate_block_right(cv, read, cpos1, olapping, block, block_idx)
cv.vid = get_next_id(read.query_name)
elif len(left) > 0:
# annotate left side
cv.cvtype = 'EE'
cv = annotate_block_left(cv, read, cpos2, left, block, block_idx)
cv.vid = get_next_id(read.query_name)
elif len(right) > 0:
# annotate right side
cv.cvtype = 'EE'
cv = annotate_block_right(cv, read, cpos1, right, block, block_idx)
cv.vid = get_next_id(read.query_name)
else:
# block does not cross any annotation
qseq, rseq = bh.get_block_sequence(read, block_idx)
cv.ref, cv.alt = rseq, '[' + qseq + ']'
cv.pos, cv.cvtype = block[0] + 1, 'NE'
cv.cpos = cpos1
cv.vid = get_next_id(read.query_name)
cv.vsize, cv.cvsize = abs(len(cv.alt)-2 - len(cv.ref)), len(cv.alt)-2
print(cv.vcf_output())
CrypticVariant.write_contig_info(ci_file, cv)
def annotate_fusion(args, read, juncs, bam_idx, ex_ref, ref_trees, outbam):
try:
r1, r2 = bam_idx.find(read.query_name)
except ValueError:
logging.info('WARNING: found >2 reads matching hard-clipped read %s; cannot process' % read.query_name)
return
ci_file = args.contig_info_output
cv1 = CrypticVariant().from_read(r1)
cv2 = CrypticVariant().from_read(r2)
cv1.cvtype, cv2.cvtype = 'FUS', 'FUS'
cv1.genes = get_overlapping_genes(r1, ref_trees)
cv2.genes = get_overlapping_genes(r2, ref_trees)
if cv1.genes == cv2.genes:
# intra-genic rearrangement
cv1.cvtype, cv2.cvtype = 'IGR', 'IGR'
if cv1.genes == '' and cv2.genes == '':
# no intersecting gene, this is not an interesting fusion
logging.info('No gene(s) intersecting candidate fusion contig %s; skipping' % read.query_name)
record[read.query_name] = []
return
hc_idx1 = [idx for idx, clip in enumerate(r1.cigar) if clip[0] == constants.CIGAR['hard-clip']][0]
hc_left1 = hc_idx1 == 0
block_idx1 = 0 if hc_left1 else np.max(np.where(np.array([b[0] for b in cv1.blocks]) < hc_idx1)[0])
block1 = cv1.blocks[block_idx1][1]
cv1.pos = int(block1[0]) if hc_left1 else int(block1[1])
hc_idx2 = [idx for idx, clip in enumerate(r2.cigar) if clip[0] == constants.CIGAR['hard-clip']][0]
hc_left2 = hc_idx2 == 0
block_idx2 = 0 if hc_left2 else np.max(np.where(np.array([b[0] for b in cv2.blocks]) < hc_idx2)[0])
block2 = cv2.blocks[block_idx2][1]
cv2.pos = int(block2[0]) if hc_left2 else int(block2[1])
#TODO: handle inserted sequence btw fusion
varseq1, varseq2 = '', ''
refseq1 = r1.get_reference_sequence()
refseq2 = r2.get_reference_sequence()
bracket_dir1 = '[' if r1.is_reverse == r2.is_reverse else ']'
bracket_dir2 = ']' if r1.is_reverse == r2.is_reverse else ']'
cv1.ref = refseq1[0] if hc_left1 else refseq1[-1]
cv2.ref = refseq2[0] if hc_left2 else refseq1[-1]
if r1.is_reverse == r2.is_reverse:
cv1.alt = '%s]%s:%d]%s' % (varseq1, cv2.chrom, cv2.pos-1, cv1.ref) if hc_left1 else \
'%s[%s:%d[%s' % (cv1.ref, cv2.chrom, cv2.pos+1, varseq1)
cv2.alt = '%s]%s:%d]%s' % (varseq2, cv1.chrom, cv1.pos-1, cv2.ref) if hc_left2 else \
'%s[%s:%d[%s' % (cv2.ref, cv1.chrom, cv1.pos+1, varseq2)
else:
# contigs align on opposite strands
cv1.alt = '%s[%s:%d[%s' % (varseq1, cv2.chrom, cv2.pos-1, cv1.ref) if hc_left1 else \
'%s]%s:%d]%s' % (cv1.ref, cv2.chrom, cv2.pos+1, varseq1)
cv2.alt = '%s[%s:%d[%s' % (varseq2, cv1.chrom, cv1.pos-1, cv2.ref) if hc_left2 else \
'%s]%s:%d]%s' % (cv2.ref, cv1.chrom, cv1.pos+1, varseq2)
cv1.vid = get_next_id(r1.query_name)
cv2.vid = get_next_id(r2.query_name)
cv1.parid, cv2.parid = cv2.vid, cv1.vid
# set cpos as the location of clip on the contig
rcigar = r1.cigar[::-1] if cv1.cstrand == '-' else r1.cigar
idx = len(rcigar) - hc_idx1 - 1 if cv1.cstrand == '-' else hc_idx1 + 1
cv1.cpos = sum([v for c,v in rcigar[:idx] if c in constants.AFFECT_CONTIG])
print(cv1.vcf_output())
print(cv2.vcf_output())
outbam.write(r1)
outbam.write(r2)
CrypticVariant.write_contig_info(ci_file, cv1, cv2)
annotate_single_read(args, r1, juncs, ex_ref, ref_trees, genes=cv1.genes)
annotate_single_read(args, r2, juncs, ex_ref, ref_trees, genes=cv2.genes)
def annotate_juncs(cv, read, locs, novel_juncs, ci_file):
cv.cvsize = 0
for junc in novel_juncs:
pos1, pos2 = int(junc[1]), int(junc[2])
junc_idx = [idx for idx, block in cv.blocks if block[1] == pos1][0]
junc_type = read.cigar[junc_idx+1][0]
if junc_type in constants.GAPS or junc_type == constants.CIGAR['soft-clip']:
continue
cp = CrypticVariant().from_read(read) # partner variant
cp.genes = cv.genes
varseq, refseq = '', read.get_reference_sequence()
rcigar = read.cigar[::-1] if cv.cstrand == '-' else read.cigar
idx = len(rcigar) - junc_idx - 1 if cv.cstrand == '-' else junc_idx + 1
cpos = sum([v for c,v in rcigar[:idx] if c in constants.AFFECT_CONTIG])
cv.cpos, cp.cpos = cpos, cpos
cv.pos, cp.pos = pos1, pos2+1
rpos = sum([v for c,v in read.cigar[:(junc_idx+1)] if c in constants.AFFECT_REF])
cv.ref, cp.ref = refseq[rpos-1], refseq[rpos]
cv.alt = '%s[%s:%d[%s' % (cv.ref, cv.chrom, cv.pos, varseq)
cp.alt = '%s]%s:%d]%s' % (varseq, cp.chrom, cp.pos, cp.ref)
cv.vid = get_next_id(read.query_name)
cp.vid = get_next_id(read.query_name)
cv.parid, cp.parid = cp.vid, cv.vid
loc_left = '%s:%d' % (cv.chrom, pos1)
loc_right = '%s:%d' % (cv.chrom, pos2)
if not (loc_left in locs) and not (loc_right in locs):
# neither end annotated, novel exon junction
cv.cvtype, cp.cvtype = 'NEJ', 'NEJ'
elif not (loc_left in locs and loc_right in locs):
# one end unannotated, partial novel junction
cv.cvtype, cp.cvtype = 'PNJ', 'PNJ'
else:
# both ends annotated, alternative splice site
cv.cvtype, cp.cvtype = 'AS', 'AS'
print(cv.vcf_output())
print(cp.vcf_output())
CrypticVariant.write_contig_info(ci_file, cv, cp)
def annotate_single_read(args, read, juncs, ex_ref, ref_trees, outbam=None, genes=''):
'''
Annotate insertions, deletions and soft-clips on a single read
'''
ci_file = args.contig_info_output
genes = get_overlapping_genes(read, ref_trees) if genes == '' else genes
fusion = any([op == constants.CIGAR['hard-clip'] and val >= MIN_CLIP for op, val in read.cigar])
if genes == '' and not fusion:
logging.info('No gene(s) intersecting read %s; skipping' % read.query_name)
return
# check for contig gaps or soft-clips
has_gaps = any([op in constants.GAPS and val >= MIN_GAP for op, val in read.cigar])
has_scs = any([op == constants.CIGAR['soft-clip'] and val >= MIN_CLIP for op, val in read.cigar])
is_spliced = any([op == constants.CIGAR['skipped'] for op, val in read.cigar])
# check junctions
tx_juncs = get_tx_juncs(read)
unknown_juncs = ['%s:%s-%s' % (c, s, e) not in juncs[0] for c, s, e in tx_juncs]
has_novel_juncs = any(unknown_juncs)
# check for novel blocks
chr_ref = get_chr_ref(read, ex_ref)
has_novel_blocks = any([bh.is_novel_block(block, chr_ref, MIN_CLIP) for block in read.get_blocks()])
if has_gaps or has_scs or has_novel_juncs or has_novel_blocks:
cv = CrypticVariant().from_read(read)
cv.genes = genes
if | |
= 0
self.yDataAvailable = 0
self.zDataAvailable = 0
self.zyxDataAvailable = 0
self.xDataOverrun = 0
self.yDataOverrun = 0
self.zDataOverrun = 0
self.zyxDataOverrun = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.xDataAvailable & 0x1) << 0) | ((self.yDataAvailable & 0x1) << 1) | ((self.zDataAvailable & 0x1) << 2) | ((self.zyxDataAvailable & 0x1) << 3) | ((self.xDataOverrun & 0x1) << 4) | ((self.yDataOverrun & 0x1) << 5) | ((self.zDataOverrun & 0x1) << 6) | ((self.zyxDataOverrun & 0x1) << 7)
def setValue(self, value: int):
self.xDataAvailable = ((value >> 0) & 0x1)
self.yDataAvailable = ((value >> 1) & 0x1)
self.zDataAvailable = ((value >> 2) & 0x1)
self.zyxDataAvailable = ((value >> 3) & 0x1)
self.xDataOverrun = ((value >> 4) & 0x1)
self.yDataOverrun = ((value >> 5) & 0x1)
self.zDataOverrun = ((value >> 6) & 0x1)
self.zyxDataOverrun = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "XDataAvailable: {} (offset: 0, width: 1)\r\n".format(self.xDataAvailable)
retVal += "YDataAvailable: {} (offset: 1, width: 1)\r\n".format(self.yDataAvailable)
retVal += "ZDataAvailable: {} (offset: 2, width: 1)\r\n".format(self.zDataAvailable)
retVal += "ZyxDataAvailable: {} (offset: 3, width: 1)\r\n".format(self.zyxDataAvailable)
retVal += "XDataOverrun: {} (offset: 4, width: 1)\r\n".format(self.xDataOverrun)
retVal += "YDataOverrun: {} (offset: 5, width: 1)\r\n".format(self.yDataOverrun)
retVal += "ZDataOverrun: {} (offset: 6, width: 1)\r\n".format(self.zDataOverrun)
retVal += "ZyxDataOverrun: {} (offset: 7, width: 1)\r\n".format(self.zyxDataOverrun)
return retVal
class FifoControlRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x2E, 1, False)
self.fifoThreshold = 0
self.fifoMode = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.fifoThreshold & 0x1F) << 0) | ((self.fifoMode & 0x7) << 5)
def setValue(self, value: int):
self.fifoThreshold = ((value >> 0) & 0x1F)
self.fifoMode = ((value >> 5) & 0x7)
def __str__(self):
retVal = ""
retVal += "FifoThreshold: {} (offset: 0, width: 5)\r\n".format(self.fifoThreshold)
retVal += "FifoMode: {} (offset: 5, width: 3)\r\n".format(self.fifoMode)
return retVal
class FifoSourceRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x2f, 1, False)
self.fifoStoredLevel = 0
self.empty = 0
self.overrun = 0
self.fifoThreshold = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.fifoStoredLevel & 0x1F) << 0) | ((self.empty & 0x1) << 5) | ((self.overrun & 0x1) << 6) | ((self.fifoThreshold & 0x1) << 7)
def setValue(self, value: int):
self.fifoStoredLevel = ((value >> 0) & 0x1F)
self.empty = ((value >> 5) & 0x1)
self.overrun = ((value >> 6) & 0x1)
self.fifoThreshold = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "FifoStoredLevel: {} (offset: 0, width: 5)\r\n".format(self.fifoStoredLevel)
retVal += "Empty: {} (offset: 5, width: 1)\r\n".format(self.empty)
retVal += "Overrun: {} (offset: 6, width: 1)\r\n".format(self.overrun)
retVal += "FifoThreshold: {} (offset: 7, width: 1)\r\n".format(self.fifoThreshold)
return retVal
class InertialIntGen1ConfigRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x30, 1, False)
self.xLowInterruptEnable = 0
self.xHighInterruptEnable = 0
self.yLowInterruptEnable = 0
self.yHighInterruptEnable = 0
self.zLowInterruptEvent = 0
self.zHighInterruptEnable = 0
self.detect6D = 0
self.andOrInterruptEvents = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.xLowInterruptEnable & 0x1) << 0) | ((self.xHighInterruptEnable & 0x1) << 1) | ((self.yLowInterruptEnable & 0x1) << 2) | ((self.yHighInterruptEnable & 0x1) << 3) | ((self.zLowInterruptEvent & 0x1) << 4) | ((self.zHighInterruptEnable & 0x1) << 5) | ((self.detect6D & 0x1) << 6) | ((self.andOrInterruptEvents & 0x1) << 7)
def setValue(self, value: int):
self.xLowInterruptEnable = ((value >> 0) & 0x1)
self.xHighInterruptEnable = ((value >> 1) & 0x1)
self.yLowInterruptEnable = ((value >> 2) & 0x1)
self.yHighInterruptEnable = ((value >> 3) & 0x1)
self.zLowInterruptEvent = ((value >> 4) & 0x1)
self.zHighInterruptEnable = ((value >> 5) & 0x1)
self.detect6D = ((value >> 6) & 0x1)
self.andOrInterruptEvents = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "XLowInterruptEnable: {} (offset: 0, width: 1)\r\n".format(self.xLowInterruptEnable)
retVal += "XHighInterruptEnable: {} (offset: 1, width: 1)\r\n".format(self.xHighInterruptEnable)
retVal += "YLowInterruptEnable: {} (offset: 2, width: 1)\r\n".format(self.yLowInterruptEnable)
retVal += "YHighInterruptEnable: {} (offset: 3, width: 1)\r\n".format(self.yHighInterruptEnable)
retVal += "ZLowInterruptEvent: {} (offset: 4, width: 1)\r\n".format(self.zLowInterruptEvent)
retVal += "ZHighInterruptEnable: {} (offset: 5, width: 1)\r\n".format(self.zHighInterruptEnable)
retVal += "Detect6D: {} (offset: 6, width: 1)\r\n".format(self.detect6D)
retVal += "AndOrInterruptEvents: {} (offset: 7, width: 1)\r\n".format(self.andOrInterruptEvents)
return retVal
class InertialIntGen1StatusRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x31, 1, False)
self.xLow = 0
self.xHigh = 0
self.yLow = 0
self.yHigh = 0
self.zLow = 0
self.zHigh = 0
self.intStatus = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.xLow & 0x1) << 0) | ((self.xHigh & 0x1) << 1) | ((self.yLow & 0x1) << 2) | ((self.yHigh & 0x1) << 3) | ((self.zLow & 0x1) << 4) | ((self.zHigh & 0x1) << 5) | ((self.intStatus & 0x1) << 6)
def setValue(self, value: int):
self.xLow = ((value >> 0) & 0x1)
self.xHigh = ((value >> 1) & 0x1)
self.yLow = ((value >> 2) & 0x1)
self.yHigh = ((value >> 3) & 0x1)
self.zLow = ((value >> 4) & 0x1)
self.zHigh = ((value >> 5) & 0x1)
self.intStatus = ((value >> 6) & 0x1)
def __str__(self):
retVal = ""
retVal += "XLow: {} (offset: 0, width: 1)\r\n".format(self.xLow)
retVal += "XHigh: {} (offset: 1, width: 1)\r\n".format(self.xHigh)
retVal += "YLow: {} (offset: 2, width: 1)\r\n".format(self.yLow)
retVal += "YHigh: {} (offset: 3, width: 1)\r\n".format(self.yHigh)
retVal += "ZLow: {} (offset: 4, width: 1)\r\n".format(self.zLow)
retVal += "ZHigh: {} (offset: 5, width: 1)\r\n".format(self.zHigh)
retVal += "IntStatus: {} (offset: 6, width: 1)\r\n".format(self.intStatus)
return retVal
class InertialIntGen1ThresholdRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x32, 1, False)
self.value = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.value & 0x7F) << 0)
def setValue(self, value: int):
self.value = ((value >> 0) & 0x7F)
def __str__(self):
retVal = ""
retVal += "Value: {} (offset: 0, width: 7)\r\n".format(self.value)
return retVal
class InertialIntGen1DurationRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x33, 1, False)
self.value = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.value & 0x7F) << 0)
def setValue(self, value: int):
self.value = ((value >> 0) & 0x7F)
def __str__(self):
retVal = ""
retVal += "Value: {} (offset: 0, width: 7)\r\n".format(self.value)
return retVal
class InertialIntGen2ConfigRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x34, 1, False)
self.xLowInterruptEnable = 0
self.xHighInterruptEnable = 0
self.yLowInterruptEnable = 0
self.yHighInterruptEnable = 0
self.zLowInterruptEvent = 0
self.zHighInterruptEnable = 0
self.detect6D = 0
self.andOrInterruptEvents = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.xLowInterruptEnable & 0x1) << 0) | ((self.xHighInterruptEnable & 0x1) << 1) | ((self.yLowInterruptEnable & 0x1) << 2) | ((self.yHighInterruptEnable & 0x1) << 3) | ((self.zLowInterruptEvent & 0x1) << 4) | ((self.zHighInterruptEnable & 0x1) << 5) | ((self.detect6D & 0x1) << 6) | ((self.andOrInterruptEvents & 0x1) << 7)
def setValue(self, value: int):
self.xLowInterruptEnable = ((value >> 0) & 0x1)
self.xHighInterruptEnable = ((value >> 1) & 0x1)
self.yLowInterruptEnable = ((value >> 2) & 0x1)
self.yHighInterruptEnable = ((value >> 3) & 0x1)
self.zLowInterruptEvent = ((value >> 4) & 0x1)
self.zHighInterruptEnable = ((value >> 5) & 0x1)
self.detect6D = ((value >> 6) & 0x1)
self.andOrInterruptEvents = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "XLowInterruptEnable: {} (offset: 0, width: 1)\r\n".format(self.xLowInterruptEnable)
retVal += "XHighInterruptEnable: {} (offset: 1, width: 1)\r\n".format(self.xHighInterruptEnable)
retVal += "YLowInterruptEnable: {} (offset: 2, width: 1)\r\n".format(self.yLowInterruptEnable)
retVal += "YHighInterruptEnable: {} (offset: 3, width: 1)\r\n".format(self.yHighInterruptEnable)
retVal += "ZLowInterruptEvent: {} (offset: 4, width: 1)\r\n".format(self.zLowInterruptEvent)
retVal += "ZHighInterruptEnable: {} (offset: 5, width: 1)\r\n".format(self.zHighInterruptEnable)
retVal += "Detect6D: {} (offset: 6, width: 1)\r\n".format(self.detect6D)
retVal += "AndOrInterruptEvents: {} (offset: 7, width: 1)\r\n".format(self.andOrInterruptEvents)
return retVal
class InertialIntGen2StatusRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x35, 1, False)
self.xLow = 0
self.xHigh = 0
self.yLow = 0
self.yHigh = 0
self.zLow = 0
self.zHigh = 0
self.intStatus = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.xLow & 0x1) << 0) | ((self.xHigh & 0x1) << 1) | ((self.yLow & 0x1) << 2) | ((self.yHigh & 0x1) << 3) | ((self.zLow & 0x1) << 4) | ((self.zHigh & 0x1) << 5) | ((self.intStatus & 0x1) << 6)
def setValue(self, value: int):
self.xLow = ((value >> 0) & 0x1)
self.xHigh = ((value >> 1) & 0x1)
self.yLow = ((value >> 2) & 0x1)
self.yHigh = ((value >> 3) & 0x1)
self.zLow = | |
in executing the test command encountered" + result + ": FAILED", tid)
else:
log_red(
'Verify that "/" is Read Only type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /test')
# Testing "/proprietary"
# if "/dev/mapper/proprietary on /proprietary type ext4 (ro,noatime,data=ordered)" in file_structure:
if "/dev/mmcblk2p8 on /proprietary type ext4 (ro,noatime,data=ordered)" in file_structure:
log_green(
'Verify that the file structure has "/proprietary" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/proprietary" mounted correctly: FAILED', tid)
if "/dev/mmcblk2p8 on /proprietary type ext4 (ro,noatime,data=ordered)" in file_structure:
log_green('Verify that "/proprietary" is NOT encrypted: PASSED', tid)
else:
log_red('Verify that "/proprietary" is NOT encrypted: FAILED', tid)
if "/proprietary type ext4" in file_structure:
log_green(
'Verify that "/proprietary" is ext4 type file structure: PASSED', tid)
else:
log_red(
'Verify that "/proprietary" is ext4 type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /proprietary/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_green(
'Verify that "/proprietary" is Read Only type file structure: PASSED', tid)
# log_red("error in executing the test command encountered" + result + ": FAILED", tid)
else:
log_red(
'Verify that "/proprietary" is Read Only type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command(
'rm /proprietary/test')
# Testing "/autonet"
# if "/dev/mapper/autonet on /autonet type ext4 (rw,noatime,data=ordered)" in file_structure:
if "/dev/mmcblk2p10 on /autonet type ext4 (rw,noatime,block_validity,delalloc,barrier,user_xattr,acl)" in file_structure:
log_green(
'Verify that the file structure has "/autonet" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/autonet" mounted correctly: FAILED', tid)
if "/dev/mmcblk2p10 on /autonet type ext4 (rw,noatime,block_validity,delalloc,barrier,user_xattr,acl)" in file_structure:
log_green('Verify that "/autonet" is NOT encrypted: PASSED', tid)
else:
log_red('Verify that "/autonet" is NOT encrypted: FAILED', tid)
if "/autonet type ext4" in file_structure:
log_green(
'Verify that "/autonet" is ext4 type file structure: PASSED', tid)
else:
log_red(
'Verify that "/autonet" is ext4 type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /autonet/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that "/autonet" is Read/Write type file structure: FAILED', tid)
else:
log_green(
'Verify that "/autonet" is Read/Write type file structure: PASSED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /autonet/test')
# log_red("error in executing the test command encountered" + result + ": FAILED", tid)
# Testing "/user"
if "/dev/mmcblk2p15 on /user type ext4 (rw,noatime,data=ordered)" in file_structure:
log_green(
'Verify that the file structure has "/user" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/user" mounted correctly: FAILED', tid)
if "/dev/mmcblk2p15 on /user type ext4 (rw,noatime,data=ordered)" in file_structure:
log_green('Verify that "/user" is NOT encrypted: PASSED', tid)
else:
log_red('Verify that "/user" is NOT encrypted: FAILED', tid)
if "/user type ext4" in file_structure:
log_green(
'Verify that "/user" is ext4 type file structure: PASSED', tid)
else:
log_red(
'Verify that "/user" is ext4 type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /user/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that "/user" is Read/Write type file structure: FAILED', tid)
else:
log_green(
'Verify that "/user" is Read/Write type file structure: PASSED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /user/test')
# log_red("error in executing the test command encountered" + result + ": FAILED", tid)
# Testing "/tmp"
# if "tmpfs on /tmp type tmpfs (rw,noatime)" in file_structure:
if "tmpfs on /tmp type tmpfs (rw,relatime)" in file_structure:
log_green(
'Verify that the file structure has "/tmp" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/tmp" mounted correctly: FAILED', tid)
if "tmpfs on /tmp" in file_structure:
log_green('Verify that "/tmp" is NOT encrypted: PASSED', tid)
else:
log_red('Verify that "/tmp" is encrypted: FAILED', tid)
if "/tmp type tmpfs" in file_structure:
log_green(
'Verify that "/tmp" is tmpfs type file structure: PASSED', tid)
else:
log_red(
'Verify that "/tmp" is tmpfs type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /tmp/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that "/tmp" is Read/Write type file structure: FAILED', tid)
else:
log_green(
'Verify that "/tmp" is Read/Write type file structure: PASSED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /tmp/test')
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
@unittest.skipIf(encrypted == 0, "The build has NON encrypted file structure")
def test_REFP_017_file_system_structure_en(self):
'''Verify the design of file system structure'''
tid = 'REFP_017'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify the design of file system structure')
print('[Product Requirement ]: EINST-021, EINST-004')
print('[Development Task ]: CONLAREINS-67, CONLAREINS-34, CONLAREINS-253')
print('[Test Automation Task ]: CONLAREINS-183, CONLAREINS-283')
log_blue('[================================================================================================================]')
# TBA These test cases need to be modified for dm-verity and non-dm-verity type builds, currently they are set for non
ssh = self.ssh # handle
try:
stdin, stdout, stderr = ssh.exec_command('mount')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
"error in executing the test command encountered" + result + ": FAILED", tid)
file_structure = stdout.read().decode('UTF-8')
# print(file_structure)
# actual output displayed for non-encypted file system
'''
root@einstein-049F041758(5.2.7):~$ mount
/dev/dm-0 on / type ext4 (ro,noatime,data=ordered)
devtmpfs on /dev type devtmpfs (rw,relatime,size=867096k,nr_inodes=216774,mode=755)
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime)
proc on /proc type proc (rw,relatime)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
smackfs on /sys/fs/smackfs type smackfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
devpts on /dev/pts type devpts (rw,relatime,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,nodev,mode=755)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
cgroup on /sys/fs/cgroup/net_cls type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls)
cgroup on /sys/fs/cgroup/debug type cgroup (rw,nosuid,nodev,noexec,relatime,debug)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer,release_agent=/legato/systems/current/bin/_appStopClient)
tmpfs on /etc/machine-id type tmpfs (ro,mode=755)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
tmpfs on /tmp type tmpfs (rw,relatime)
tmpfs on /var/volatile type tmpfs (rw,relatime)
smackfs on /smack type smackfs (rw,relatime)
tmpfs on /var/spool type tmpfs (rw,relatime)
tmpfs on /var/cache type tmpfs (rw,relatime)
tmpfs on /var/lib type tmpfs (rw,relatime)
/dev/mapper/autonet on /autonet type ext4 (rw,noatime,block_validity,delalloc,barrier,user_xattr,acl)
/dev/mapper/proprietary on /proprietary type ext4 (ro,noatime,block_validity,delalloc,barrier,user_xattr,acl)
/dev/mapper/user on /user type ext4 (rw,noatime,data=ordered)
tmpfs on /etc/ld.so.cache type tmpfs (rw,relatime)
tmpfs on /etc/hosts type tmpfs (rw,relatime)
/dev/mapper/autonet on /legato type ext4 (rw,noatime,block_validity,delalloc,barrier,user_xattr,acl)
smack on /legato/smack type smackfs (rw,relatime)
smack on /autonet/legato/flash/legato/smack type smackfs (rw,relatime)
/dev/mapper/autonet on /legato/systems/current type ext4 (rw,noatime,block_validity,delalloc,barrier,user_xattr,acl)
/dev/mapper/autonet on /autonet/legato/flash/legato/systems/current type ext4 (rw,noatime,block_validity,delalloc,barrier,user_xattr,acl)
'''
# Testing "/"
if "/dev/dm-0 on / type ext4 (ro,noatime,data=ordered)" in file_structure:
log_green(
'Verify that the file structure has "/dev/dm-0" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/dev/dm-0" mounted correctly: FAILED', tid)
if "/dev/dm-0" in file_structure:
log_green(
'Verify that file system "/" is NOT encrypted: PASSED', tid)
else:
log_red('Verify that file system "/" is encrypted: FAILED', tid)
if "/dev/dm-0 on / type ext4" in file_structure:
log_green(
'Verify that "/" is ext4 type file structure: PASSED', tid)
else:
log_red('Verify that "/" is ext4 type file structure: FAILED', tid)
if "/dev/dm-0 on / type ext4 (ro,noatime,data=ordered)" in file_structure:
log_green(
'Verify that "/" is read-only file structure: PASSED', tid)
else:
log_red('Verify that "/" is read-only file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_green(
'Verify that "/" is Read Only type file structure: PASSED', tid)
# log_red("error in executing the test command encountered" + result + ": FAILED", tid)
else:
log_red(
'Verify that "/" is Read Only type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /test')
# Testing "/proprietary"
# if "/dev/mapper/proprietary on /proprietary type ext4 (ro,noatime,data=ordered)" in file_structure:
if "/dev/mapper/proprietary on /proprietary type ext4 (ro,noatime,block_validity,delalloc,barrier,user_xattr,acl)" in file_structure:
log_green(
'Verify that the file structure has "/proprietary" | |
#!/usr/bin/env python
__Author__ = "<NAME>"
# ROS import
import rospy
import rospkg
from geometry_msgs.msg import Twist, Vector3Stamped, Pose, PoseWithCovarianceStamped
from sensor_msgs.msg import Imu
#from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Empty
# Required Libraries
import time, csv
import random
import numpy as np
import skimage
from collections import deque # Ordered collection with ends
from gym import wrappers
import tensorflow as tf
from lib.exp_replay import Memory
from lib.get_next_obs import nextObs
from lib.sdsnet import DDDQNNet
from lib.memory import SumTree, Memory
import env_sds
from ast import literal_eval
import warnings
import gym
warnings.filterwarnings('ignore')
def stack_ob(bined_ob, obs= None):
# Preprocess frame
bOB = bined_ob
if obs is None:
# Clear our stacked_frames
obs = deque([bOB for _ in range(stack_size)], maxlen=4)
# Stack the frames
stacked_state = np.stack(obs, axis=1)
else:
# Append frame to deque, automatically removes the oldest frame
obs.append(bOB)
# Build the stacked state (first dimension specifies different frames)
stacked_state = np.stack(obs, axis=1)
stacked_s = np.expand_dims(stacked_state, 0)
return stacked_s, obs
def predict_action(sess, explore_start, explore_stop, decay_rate, decay_step, s_state):
## EPSILON GREEDY STRATEGY
# Choose action a from state s using epsilon greedy.
## First we randomize a number
exp_exp_tradeoff = np.random.rand()
# Here we'll use an improved version of our epsilon greedy strategy used in Q-learning notebook
explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)
if (explore_probability > exp_exp_tradeoff):
# Make a random action (exploration)
action = random.choice(possible_actions)
else:
# Get action from Q-network (exploitation)
# Estimate the Qs values state
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: np.expand_dims(s_state, 0)})
# Take the biggest Q value (= the best action)
choice = np.argmax(Qs)
action = possible_actions[int(choice)]
return action, explore_probability
def make_batch():
for i in range(4*batch_size):
# If it's the first step
# First we need a state
state = env.reset()
stacked_state, obs = stack_ob(state)
step = 0
while step <= nsteps:
# Random action
action = random.choice(possible_actions)
act = possible_actions.index(action)
# Get the rewards
next_state, reward, done, drone_shot = env.step(act)
if len(next_state) > 4:
step += 1
next_stacked_state, next_obs = stack_ob(next_state, obs)
memory.store((stacked_state, action, reward, next_stacked_state, done))
# If we're dead
if done:
step = nsteps + 1
else:
stacked_state = next_stacked_state
obs = next_obs
else:
step = nsteps + 1
# This function helps us to copy one set of variables to another
# In our case we use it when we want to copy the parameters of DQN to Target_network
# Thanks of the very good implementation of <NAME> https://github.com/awjuliani
def update_target_graph():
# Get the parameters of our DQNNetwork
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "DQNetwork")
# Get the parameters of our Target_network
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "TargetNetwork")
op_holder = []
# Update our target_network parameters with DQNNetwork parameters
for from_var, to_var in zip(from_vars, to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
def main():
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if training:
if not resume:
make_batch()
writer = tf.summary.FileWriter(outdir + "tensorboard/")
## Losses
tf.summary.scalar("Loss", DQNetwork.loss)
write_op = tf.summary.merge_all()
epoch = 1
loss = None
sumsEpsR = []
avgsEpsR = []
total_rewards = 0
average_reward = []
maxRRec, avgR = None, None
data_file = []
if resume:
# Load the model
saver.restore(sess, outdir + "models/sds" + str(resume_ep) + ".ckpt")
data_file.append(["Epoch", "Total reward", "Mean Reward of that batch", "Max reward for a batch so far",
"Loss", "Touch_down"])
# Initialize the decay rate (that will use to reduce epsilon)
decay_step = 0
# Set tau = 0
tau = 0
# How many times we achieved the goal
drone_shot = 0
# Update the parameters of our TargetNetwork with DQN_weights
update_target = update_target_graph()
sess.run(update_target)
for episode in range(total_episodes):
step = 0
# Initialize the rewards of the episode
episode_rewards = []
# Make a new episode and observe the first state
state = env.reset()
stacked_state, obs = stack_ob(state)
while step <= nsteps:
# Predict the action to take and take it
action, explore_probability = predict_action(sess, explore_start, explore_stop, decay_rate, decay_step,
stacked_state)
act = action.index(1)
# Get the rewards
next_state, reward, done, drone_shot = env.step(act)
if len(next_state) > 4:
step += 1
# Increase the C step
tau += 1
# Increase decay_step
decay_step += 1
# print("reward", reward, "done", done, "state", next_state, "goal", drone_shot,
# "action", act, "exp_prob", explore_probability)
next_stacked_state, next_obs = stack_ob(next_state, obs)
memory.store((stacked_state, action, reward, next_stacked_state, done))
# Add the reward to total reward
episode_rewards.append(reward)
# Get the total reward of the episode
total_reward = np.sum(episode_rewards)
# If the game is finished
if done:
sumsEpsR.append(total_reward)
avgsEpsR.append(total_reward / nsteps)
print('Episode: {}'.format(episode),
'Total reward: {}'.format(total_reward),
'Explore P: {:.4f}'.format(explore_probability),
'Target: {}'.format(next_state),
'step: {}'. format(step),
'Drone Shot: {}'.format(drone_shot))
step = nsteps + 1
else:
stacked_state = next_stacked_state
obs = next_obs
else:
step = nsteps + 1
### LEARNING PART
# Obtain random mini-batch from memory
tree_idx, batch, ISWeights_mb = memory.sample(batch_size)
stacked_states_mb = np.array([each[0][0] for each in batch], ndmin=3)
actions_mb = np.array([each[0][1] for each in batch])
rewards_mb = np.array([each[0][2] for each in batch])
next_stacked_states_mb = np.array([each[0][3] for each in batch], ndmin=3)
dones_mb = np.array([each[0][4] for each in batch])
target_Qs_batch = []
# Get Q values for next_state
q_next_state = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: next_stacked_states_mb})
# Calculate Qtarget for all actions that state
q_target_next_state = sess.run(TargetNetwork.output, feed_dict={TargetNetwork.inputs_: next_stacked_states_mb})
# Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')
for i in range(0, len(batch)):
terminal = dones_mb[i]
# We got a'
action = np.argmax(q_next_state[i])
# If we are in a terminal state, only equals reward
if terminal:
target_Qs_batch.append(rewards_mb[i])
else:
# Take the Qtarget for action a'
target = rewards_mb[i] + gamma * q_target_next_state[i][action]
target_Qs_batch.append(target)
targets_mb = np.array([each for each in target_Qs_batch])
_, loss, absolute_errors = sess.run([DQNetwork.optimizer, DQNetwork.loss, DQNetwork.absolute_errors],
feed_dict={DQNetwork.inputs_: stacked_states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb,
DQNetwork.ISWeights_: ISWeights_mb})
# Update priority
memory.batch_update(tree_idx, absolute_errors)
# Write TF Summaries
summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: stacked_states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb,
DQNetwork.ISWeights_: ISWeights_mb})
writer.add_summary(summary, episode)
writer.flush()
if tau > max_tau:
# Update the parameters of our TargetNetwork with DQN_weights
update_target = update_target_graph()
sess.run(update_target)
tau = 0
print("Model updated")
# Save model every 100 episodes
if episode % 100 == 0 and episode > 0:
save_path = saver.save(sess, outdir + "models/sds" + str(episode + resume_ep) + ".ckpt")
maxRRec = np.amax(sumsEpsR)
data_file.append([epoch, sumsEpsR[-1], avgsEpsR[-1], maxRRec, loss, drone_shot])
with open(outdir + 'sdsLog.csv', 'w') as fout:
csvwriter = csv.writer(fout)
csvwriter.writerows(data_file)
fout.close()
epoch += 1
print("Model Saved")
else:
# Load the model
saver.restore(sess, outdir + "models/sds" + str(resume_ep) + ".ckpt")
tre = 0
for j in range(batch_size):
tstp = 0
tact = 0
t_reward = []
sumsR = []
avgsR = []
# Make a new episode and observe the first state
tst = env.reset()
tstkd_st, tobs = stack_ob(tst)
g = False
while tstp <= nsteps:
if tre >= 2:
tact = 0
g = True
if not g:
# Take the biggest Q value (= the best action)
tQs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: np.expand_dims(tstkd_st, 0)})
# Take the biggest Q value (= the best action)
tchc = np.argmax(tQs)
taction = possible_actions[int(tchc)]
tact = taction.index(1)
# Get the rewards
tnxt_st, tre, tdon, tstp_c_gl = env.step(tact)
if len(tnxt_st) > 2:
tstp += 1
tnxt_stkd_st, tnxt_tobs = stack_ob(tnxt_st, tobs)
t_reward.append(tre)
# print("reward", tre, "done", tdon, "state", tnxt_st, "goal", tstp_c_gl, "action", tact)
if tdon:
# sumsR.append(t_reward)
# avgsR.append(t_reward / nsteps)
print('Episode: {}'.format(j),
'Target: {}'.format(tnxt_st),
'step: {}'.format(tstp),
'Drone Shot: {}'.format(tstp_c_gl))
tstp = nsteps + 1
else:
print("reward= ", tre)
print("Target= ", tnxt_st)
tobs = tnxt_tobs
else:
tstp = nsteps + 1
print("Score: ", np.sum(t_reward))
if __name__ == '__main__':
### ROS HYPERPARAMETERS
# Node Initialization
rospy.init_node('Selfie-Drone-Stick')
### ENVIRONMENT HYPERPARAMETERS
# Create the Environmet
env = gym.make('sds-v0')
#env.seed(1) # reproducible
env = env.unwrapped # removes all the wrappers, enter the behind the scene dynamics
rospy.loginfo("Gym Environment Initialized")
### TRAINING HYPERPARAMETERS
# Load training parameters from the yaml file
training = rospy.get_param("/training") # Training or testing
resume = rospy.get_param("/resume") # Resume from previous checkpoint
resume_ep = rospy.get_param("/resume_ep")
OUTPUT_GRAPH = rospy.get_param("/output_graph")
gamma = rospy.get_param("/gamma")
explore_start = rospy.get_param("/explore_start")
explore_stop = rospy.get_param("/explore_stop")
decay_rate = rospy.get_param("/d_rate")
learning_rate = rospy.get_param("/learning_rate")
total_episodes = rospy.get_param("/episodesTH")
batch_size = rospy.get_param("/batch_size")
nsteps = rospy.get_param("/stepsTH")
memory_size = rospy.get_param("/memory_size")
max_tau = rospy.get_param("/tauTH")
pretrain_length = batch_size
stack_size = rospy.get_param("/stack_size") # 4
ob_size = rospy.get_param("/ob_size") # 8
stacked_state_size | |
<reponame>flexera-public/sca-codeinsight-reports-spdx
'''
Copyright 2021 Flexera Software LLC
See LICENSE.TXT for full license text
SPDX-License-Identifier: MIT
Author : sgeary
Created On : Tue Apr 27 2021
File : SPDX_license_mappings.py
'''
LICENSEMAPPINGS = {}
# Manual Mappings
LICENSEMAPPINGS["GPL-1.0"]="GPL-1.0-only"
LICENSEMAPPINGS["GPL-2.0"]="GPL-2.0-only"
LICENSEMAPPINGS["GPL-3.0"]="GPL-3.0-only"
LICENSEMAPPINGS["GPL-1.0+"]="GPL-1.0-or-later"
LICENSEMAPPINGS["GPL-2.0+"]="GPL-2.0-or-later"
LICENSEMAPPINGS["GPL-3.0+"]="GPL-3.0-or-later"
LICENSEMAPPINGS["LGPL-2.0"]="LGPL-2.0-only"
LICENSEMAPPINGS["LGPL-2.1"]="LGPL-2.1-only"
LICENSEMAPPINGS["LGPL-3.0"]="LGPL-3.0-only"
LICENSEMAPPINGS["LGPL-2.0+"]="LGPL-2.0-or-later"
LICENSEMAPPINGS["LGPL-2.1+"]="LGPL-2.1-or-later"
LICENSEMAPPINGS["LGPL-3.0+"]="LGPL-3.0-or-later"
LICENSEMAPPINGS["Creative Commons Attribution 2.5 Generic"]="CC-BY-2.5"
# SPDX Direct Mappings
LICENSEMAPPINGS["0BSD"]="0BSD"
LICENSEMAPPINGS["AAL"]="AAL"
LICENSEMAPPINGS["Abstyles"]="Abstyles"
LICENSEMAPPINGS["Adobe-2006"]="Adobe-2006"
LICENSEMAPPINGS["Adobe-Glyph"]="Adobe-Glyph"
LICENSEMAPPINGS["ADSL"]="ADSL"
LICENSEMAPPINGS["AFL-1.1"]="AFL-1.1"
LICENSEMAPPINGS["AFL-1.2"]="AFL-1.2"
LICENSEMAPPINGS["AFL-2.0"]="AFL-2.0"
LICENSEMAPPINGS["AFL-2.1"]="AFL-2.1"
LICENSEMAPPINGS["AFL-3.0"]="AFL-3.0"
LICENSEMAPPINGS["Afmparse"]="Afmparse"
LICENSEMAPPINGS["AGPL-1.0-only"]="AGPL-1.0-only"
LICENSEMAPPINGS["AGPL-1.0-or-later"]="AGPL-1.0-or-later"
LICENSEMAPPINGS["AGPL-3.0-only"]="AGPL-3.0-only"
LICENSEMAPPINGS["AGPL-3.0-or-later"]="AGPL-3.0-or-later"
LICENSEMAPPINGS["Aladdin"]="Aladdin"
LICENSEMAPPINGS["AMDPLPA"]="AMDPLPA"
LICENSEMAPPINGS["AML"]="AML"
LICENSEMAPPINGS["AMPAS"]="AMPAS"
LICENSEMAPPINGS["ANTLR-PD"]="ANTLR-PD"
LICENSEMAPPINGS["ANTLR-PD-fallback"]="ANTLR-PD-fallback"
LICENSEMAPPINGS["Apache-1.0"]="Apache-1.0"
LICENSEMAPPINGS["Apache-1.1"]="Apache-1.1"
LICENSEMAPPINGS["Apache-2.0"]="Apache-2.0"
LICENSEMAPPINGS["APAFML"]="APAFML"
LICENSEMAPPINGS["APL-1.0"]="APL-1.0"
LICENSEMAPPINGS["APSL-1.0"]="APSL-1.0"
LICENSEMAPPINGS["APSL-1.1"]="APSL-1.1"
LICENSEMAPPINGS["APSL-1.2"]="APSL-1.2"
LICENSEMAPPINGS["APSL-2.0"]="APSL-2.0"
LICENSEMAPPINGS["Artistic-1.0"]="Artistic-1.0"
LICENSEMAPPINGS["Artistic-1.0-cl8"]="Artistic-1.0-cl8"
LICENSEMAPPINGS["Artistic-1.0-Perl"]="Artistic-1.0-Perl"
LICENSEMAPPINGS["Artistic-2.0"]="Artistic-2.0"
LICENSEMAPPINGS["Bahyph"]="Bahyph"
LICENSEMAPPINGS["Barr"]="Barr"
LICENSEMAPPINGS["Beerware"]="Beerware"
LICENSEMAPPINGS["BitTorrent-1.0"]="BitTorrent-1.0"
LICENSEMAPPINGS["BitTorrent-1.1"]="BitTorrent-1.1"
LICENSEMAPPINGS["blessing"]="blessing"
LICENSEMAPPINGS["BlueOak-1.0.0"]="BlueOak-1.0.0"
LICENSEMAPPINGS["Borceux"]="Borceux"
LICENSEMAPPINGS["BSD-1-Clause"]="BSD-1-Clause"
LICENSEMAPPINGS["BSD-2-Clause"]="BSD-2-Clause"
LICENSEMAPPINGS["BSD-2-Clause-Patent"]="BSD-2-Clause-Patent"
LICENSEMAPPINGS["BSD-2-Clause-Views"]="BSD-2-Clause-Views"
LICENSEMAPPINGS["BSD-3-Clause"]="BSD-3-Clause"
LICENSEMAPPINGS["BSD-3-Clause-Attribution"]="BSD-3-Clause-Attribution"
LICENSEMAPPINGS["BSD-3-Clause-Clear"]="BSD-3-Clause-Clear"
LICENSEMAPPINGS["BSD-3-Clause-LBNL"]="BSD-3-Clause-LBNL"
LICENSEMAPPINGS["BSD-3-Clause-Modification"]="BSD-3-Clause-Modification"
LICENSEMAPPINGS["BSD-3-Clause-No-Military-License"]="BSD-3-Clause-No-Military-License"
LICENSEMAPPINGS["BSD-3-Clause-No-Nuclear-License"]="BSD-3-Clause-No-Nuclear-License"
LICENSEMAPPINGS["BSD-3-Clause-No-Nuclear-License-2014"]="BSD-3-Clause-No-Nuclear-License-2014"
LICENSEMAPPINGS["BSD-3-Clause-No-Nuclear-Warranty"]="BSD-3-Clause-No-Nuclear-Warranty"
LICENSEMAPPINGS["BSD-3-Clause-Open-MPI"]="BSD-3-Clause-Open-MPI"
LICENSEMAPPINGS["BSD-4-Clause"]="BSD-4-Clause"
LICENSEMAPPINGS["BSD-4-Clause-Shortened"]="BSD-4-Clause-Shortened"
LICENSEMAPPINGS["BSD-4-Clause-UC"]="BSD-4-Clause-UC"
LICENSEMAPPINGS["BSD-Protection"]="BSD-Protection"
LICENSEMAPPINGS["BSD-Source-Code"]="BSD-Source-Code"
LICENSEMAPPINGS["BSL-1.0"]="BSL-1.0"
LICENSEMAPPINGS["BUSL-1.1"]="BUSL-1.1"
LICENSEMAPPINGS["bzip2-1.0.5"]="bzip2-1.0.5"
LICENSEMAPPINGS["bzip2-1.0.6"]="bzip2-1.0.6"
LICENSEMAPPINGS["C-UDA-1.0"]="C-UDA-1.0"
LICENSEMAPPINGS["CAL-1.0"]="CAL-1.0"
LICENSEMAPPINGS["CAL-1.0-Combined-Work-Exception"]="CAL-1.0-Combined-Work-Exception"
LICENSEMAPPINGS["Caldera"]="Caldera"
LICENSEMAPPINGS["CATOSL-1.1"]="CATOSL-1.1"
LICENSEMAPPINGS["CC-BY-1.0"]="CC-BY-1.0"
LICENSEMAPPINGS["CC-BY-2.0"]="CC-BY-2.0"
LICENSEMAPPINGS["CC-BY-2.5"]="CC-BY-2.5"
LICENSEMAPPINGS["CC-BY-3.0"]="CC-BY-3.0"
LICENSEMAPPINGS["CC-BY-3.0-AT"]="CC-BY-3.0-AT"
LICENSEMAPPINGS["CC-BY-3.0-US"]="CC-BY-3.0-US"
LICENSEMAPPINGS["CC-BY-4.0"]="CC-BY-4.0"
LICENSEMAPPINGS["CC-BY-NC-1.0"]="CC-BY-NC-1.0"
LICENSEMAPPINGS["CC-BY-NC-2.0"]="CC-BY-NC-2.0"
LICENSEMAPPINGS["CC-BY-NC-2.5"]="CC-BY-NC-2.5"
LICENSEMAPPINGS["CC-BY-NC-3.0"]="CC-BY-NC-3.0"
LICENSEMAPPINGS["CC-BY-NC-4.0"]="CC-BY-NC-4.0"
LICENSEMAPPINGS["CC-BY-NC-ND-1.0"]="CC-BY-NC-ND-1.0"
LICENSEMAPPINGS["CC-BY-NC-ND-2.0"]="CC-BY-NC-ND-2.0"
LICENSEMAPPINGS["CC-BY-NC-ND-2.5"]="CC-BY-NC-ND-2.5"
LICENSEMAPPINGS["CC-BY-NC-ND-3.0"]="CC-BY-NC-ND-3.0"
LICENSEMAPPINGS["CC-BY-NC-ND-3.0-IGO"]="CC-BY-NC-ND-3.0-IGO"
LICENSEMAPPINGS["CC-BY-NC-ND-4.0"]="CC-BY-NC-ND-4.0"
LICENSEMAPPINGS["CC-BY-NC-SA-1.0"]="CC-BY-NC-SA-1.0"
LICENSEMAPPINGS["CC-BY-NC-SA-2.0"]="CC-BY-NC-SA-2.0"
LICENSEMAPPINGS["CC-BY-NC-SA-2.5"]="CC-BY-NC-SA-2.5"
LICENSEMAPPINGS["CC-BY-NC-SA-3.0"]="CC-BY-NC-SA-3.0"
LICENSEMAPPINGS["CC-BY-NC-SA-4.0"]="CC-BY-NC-SA-4.0"
LICENSEMAPPINGS["CC-BY-ND-1.0"]="CC-BY-ND-1.0"
LICENSEMAPPINGS["CC-BY-ND-2.0"]="CC-BY-ND-2.0"
LICENSEMAPPINGS["CC-BY-ND-2.5"]="CC-BY-ND-2.5"
LICENSEMAPPINGS["CC-BY-ND-3.0"]="CC-BY-ND-3.0"
LICENSEMAPPINGS["CC-BY-ND-4.0"]="CC-BY-ND-4.0"
LICENSEMAPPINGS["CC-BY-SA-1.0"]="CC-BY-SA-1.0"
LICENSEMAPPINGS["CC-BY-SA-2.0"]="CC-BY-SA-2.0"
LICENSEMAPPINGS["CC-BY-SA-2.0-UK"]="CC-BY-SA-2.0-UK"
LICENSEMAPPINGS["CC-BY-SA-2.1-JP"]="CC-BY-SA-2.1-JP"
LICENSEMAPPINGS["CC-BY-SA-2.5"]="CC-BY-SA-2.5"
LICENSEMAPPINGS["CC-BY-SA-3.0"]="CC-BY-SA-3.0"
LICENSEMAPPINGS["CC-BY-SA-3.0-AT"]="CC-BY-SA-3.0-AT"
LICENSEMAPPINGS["CC-BY-SA-4.0"]="CC-BY-SA-4.0"
LICENSEMAPPINGS["CC-PDDC"]="CC-PDDC"
LICENSEMAPPINGS["CC0-1.0"]="CC0-1.0"
LICENSEMAPPINGS["CDDL-1.0"]="CDDL-1.0"
LICENSEMAPPINGS["CDDL-1.1"]="CDDL-1.1"
LICENSEMAPPINGS["CDL-1.0"]="CDL-1.0"
LICENSEMAPPINGS["CDLA-Permissive-1.0"]="CDLA-Permissive-1.0"
LICENSEMAPPINGS["CDLA-Sharing-1.0"]="CDLA-Sharing-1.0"
LICENSEMAPPINGS["CECILL-1.0"]="CECILL-1.0"
LICENSEMAPPINGS["CECILL-1.1"]="CECILL-1.1"
LICENSEMAPPINGS["CECILL-2.0"]="CECILL-2.0"
LICENSEMAPPINGS["CECILL-2.1"]="CECILL-2.1"
LICENSEMAPPINGS["CECILL-B"]="CECILL-B"
LICENSEMAPPINGS["CECILL-C"]="CECILL-C"
LICENSEMAPPINGS["CERN-OHL-1.1"]="CERN-OHL-1.1"
LICENSEMAPPINGS["CERN-OHL-1.2"]="CERN-OHL-1.2"
LICENSEMAPPINGS["CERN-OHL-P-2.0"]="CERN-OHL-P-2.0"
LICENSEMAPPINGS["CERN-OHL-S-2.0"]="CERN-OHL-S-2.0"
LICENSEMAPPINGS["CERN-OHL-W-2.0"]="CERN-OHL-W-2.0"
LICENSEMAPPINGS["ClArtistic"]="ClArtistic"
LICENSEMAPPINGS["CNRI-Jython"]="CNRI-Jython"
LICENSEMAPPINGS["CNRI-Python"]="CNRI-Python"
LICENSEMAPPINGS["CNRI-Python-GPL-Compatible"]="CNRI-Python-GPL-Compatible"
LICENSEMAPPINGS["Condor-1.1"]="Condor-1.1"
LICENSEMAPPINGS["copyleft-next-0.3.0"]="copyleft-next-0.3.0"
LICENSEMAPPINGS["copyleft-next-0.3.1"]="copyleft-next-0.3.1"
LICENSEMAPPINGS["CPAL-1.0"]="CPAL-1.0"
LICENSEMAPPINGS["CPL-1.0"]="CPL-1.0"
LICENSEMAPPINGS["CPOL-1.02"]="CPOL-1.02"
LICENSEMAPPINGS["Crossword"]="Crossword"
LICENSEMAPPINGS["CrystalStacker"]="CrystalStacker"
LICENSEMAPPINGS["CUA-OPL-1.0"]="CUA-OPL-1.0"
LICENSEMAPPINGS["Cube"]="Cube"
LICENSEMAPPINGS["curl"]="curl"
LICENSEMAPPINGS["D-FSL-1.0"]="D-FSL-1.0"
LICENSEMAPPINGS["diffmark"]="diffmark"
LICENSEMAPPINGS["DOC"]="DOC"
LICENSEMAPPINGS["Dotseqn"]="Dotseqn"
LICENSEMAPPINGS["DRL-1.0"]="DRL-1.0"
LICENSEMAPPINGS["DSDP"]="DSDP"
LICENSEMAPPINGS["dvipdfm"]="dvipdfm"
LICENSEMAPPINGS["ECL-1.0"]="ECL-1.0"
LICENSEMAPPINGS["ECL-2.0"]="ECL-2.0"
LICENSEMAPPINGS["EFL-1.0"]="EFL-1.0"
LICENSEMAPPINGS["EFL-2.0"]="EFL-2.0"
LICENSEMAPPINGS["eGenix"]="eGenix"
LICENSEMAPPINGS["Entessa"]="Entessa"
LICENSEMAPPINGS["EPICS"]="EPICS"
LICENSEMAPPINGS["EPL-1.0"]="EPL-1.0"
LICENSEMAPPINGS["EPL-2.0"]="EPL-2.0"
LICENSEMAPPINGS["ErlPL-1.1"]="ErlPL-1.1"
LICENSEMAPPINGS["etalab-2.0"]="etalab-2.0"
LICENSEMAPPINGS["EUDatagrid"]="EUDatagrid"
LICENSEMAPPINGS["EUPL-1.0"]="EUPL-1.0"
LICENSEMAPPINGS["EUPL-1.1"]="EUPL-1.1"
LICENSEMAPPINGS["EUPL-1.2"]="EUPL-1.2"
LICENSEMAPPINGS["Eurosym"]="Eurosym"
LICENSEMAPPINGS["Fair"]="Fair"
LICENSEMAPPINGS["Frameworx-1.0"]="Frameworx-1.0"
LICENSEMAPPINGS["FreeBSD-DOC"]="FreeBSD-DOC"
LICENSEMAPPINGS["FreeImage"]="FreeImage"
LICENSEMAPPINGS["FSFAP"]="FSFAP"
LICENSEMAPPINGS["FSFUL"]="FSFUL"
LICENSEMAPPINGS["FSFULLR"]="FSFULLR"
LICENSEMAPPINGS["FTL"]="FTL"
LICENSEMAPPINGS["GD"]="GD"
LICENSEMAPPINGS["GFDL-1.1-invariants-only"]="GFDL-1.1-invariants-only"
LICENSEMAPPINGS["GFDL-1.1-invariants-or-later"]="GFDL-1.1-invariants-or-later"
LICENSEMAPPINGS["GFDL-1.1-no-invariants-only"]="GFDL-1.1-no-invariants-only"
LICENSEMAPPINGS["GFDL-1.1-no-invariants-or-later"]="GFDL-1.1-no-invariants-or-later"
LICENSEMAPPINGS["GFDL-1.1-only"]="GFDL-1.1-only"
LICENSEMAPPINGS["GFDL-1.1-or-later"]="GFDL-1.1-or-later"
LICENSEMAPPINGS["GFDL-1.2-invariants-only"]="GFDL-1.2-invariants-only"
LICENSEMAPPINGS["GFDL-1.2-invariants-or-later"]="GFDL-1.2-invariants-or-later"
LICENSEMAPPINGS["GFDL-1.2-no-invariants-only"]="GFDL-1.2-no-invariants-only"
LICENSEMAPPINGS["GFDL-1.2-no-invariants-or-later"]="GFDL-1.2-no-invariants-or-later"
LICENSEMAPPINGS["GFDL-1.2-only"]="GFDL-1.2-only"
LICENSEMAPPINGS["GFDL-1.2-or-later"]="GFDL-1.2-or-later"
LICENSEMAPPINGS["GFDL-1.3-invariants-only"]="GFDL-1.3-invariants-only"
LICENSEMAPPINGS["GFDL-1.3-invariants-or-later"]="GFDL-1.3-invariants-or-later"
LICENSEMAPPINGS["GFDL-1.3-no-invariants-only"]="GFDL-1.3-no-invariants-only"
LICENSEMAPPINGS["GFDL-1.3-no-invariants-or-later"]="GFDL-1.3-no-invariants-or-later"
LICENSEMAPPINGS["GFDL-1.3-only"]="GFDL-1.3-only"
LICENSEMAPPINGS["GFDL-1.3-or-later"]="GFDL-1.3-or-later"
LICENSEMAPPINGS["Giftware"]="Giftware"
LICENSEMAPPINGS["GL2PS"]="GL2PS"
LICENSEMAPPINGS["Glide"]="Glide"
LICENSEMAPPINGS["Glulxe"]="Glulxe"
LICENSEMAPPINGS["GLWTPL"]="GLWTPL"
LICENSEMAPPINGS["gnuplot"]="gnuplot"
LICENSEMAPPINGS["GPL-1.0-only"]="GPL-1.0-only"
LICENSEMAPPINGS["GPL-1.0-or-later"]="GPL-1.0-or-later"
LICENSEMAPPINGS["GPL-2.0-only"]="GPL-2.0-only"
LICENSEMAPPINGS["GPL-2.0-or-later"]="GPL-2.0-or-later"
LICENSEMAPPINGS["GPL-3.0-only"]="GPL-3.0-only"
LICENSEMAPPINGS["GPL-3.0-or-later"]="GPL-3.0-or-later"
LICENSEMAPPINGS["gSOAP-1.3b"]="gSOAP-1.3b"
LICENSEMAPPINGS["HaskellReport"]="HaskellReport"
LICENSEMAPPINGS["Hippocratic-2.1"]="Hippocratic-2.1"
LICENSEMAPPINGS["HPND"]="HPND"
LICENSEMAPPINGS["HPND-sell-variant"]="HPND-sell-variant"
LICENSEMAPPINGS["HTMLTIDY"]="HTMLTIDY"
LICENSEMAPPINGS["IBM-pibs"]="IBM-pibs"
LICENSEMAPPINGS["ICU"]="ICU"
LICENSEMAPPINGS["IJG"]="IJG"
LICENSEMAPPINGS["ImageMagick"]="ImageMagick"
LICENSEMAPPINGS["iMatix"]="iMatix"
LICENSEMAPPINGS["Imlib2"]="Imlib2"
LICENSEMAPPINGS["Info-ZIP"]="Info-ZIP"
LICENSEMAPPINGS["Intel"]="Intel"
LICENSEMAPPINGS["Intel-ACPI"]="Intel-ACPI"
LICENSEMAPPINGS["Interbase-1.0"]="Interbase-1.0"
LICENSEMAPPINGS["IPA"]="IPA"
LICENSEMAPPINGS["IPL-1.0"]="IPL-1.0"
LICENSEMAPPINGS["ISC"]="ISC"
LICENSEMAPPINGS["JasPer-2.0"]="JasPer-2.0"
LICENSEMAPPINGS["JPNIC"]="JPNIC"
LICENSEMAPPINGS["JSON"]="JSON"
LICENSEMAPPINGS["LAL-1.2"]="LAL-1.2"
LICENSEMAPPINGS["LAL-1.3"]="LAL-1.3"
LICENSEMAPPINGS["Latex2e"]="Latex2e"
LICENSEMAPPINGS["Leptonica"]="Leptonica"
LICENSEMAPPINGS["LGPL-2.0-only"]="LGPL-2.0-only"
LICENSEMAPPINGS["LGPL-2.0-or-later"]="LGPL-2.0-or-later"
LICENSEMAPPINGS["LGPL-2.1-only"]="LGPL-2.1-only"
LICENSEMAPPINGS["LGPL-2.1-or-later"]="LGPL-2.1-or-later"
LICENSEMAPPINGS["LGPL-3.0-only"]="LGPL-3.0-only"
LICENSEMAPPINGS["LGPL-3.0-or-later"]="LGPL-3.0-or-later"
LICENSEMAPPINGS["LGPLLR"]="LGPLLR"
LICENSEMAPPINGS["Libpng"]="Libpng"
LICENSEMAPPINGS["libpng-2.0"]="libpng-2.0"
LICENSEMAPPINGS["libselinux-1.0"]="libselinux-1.0"
LICENSEMAPPINGS["libtiff"]="libtiff"
LICENSEMAPPINGS["LiLiQ-P-1.1"]="LiLiQ-P-1.1"
LICENSEMAPPINGS["LiLiQ-R-1.1"]="LiLiQ-R-1.1"
LICENSEMAPPINGS["LiLiQ-Rplus-1.1"]="LiLiQ-Rplus-1.1"
LICENSEMAPPINGS["Linux-OpenIB"]="Linux-OpenIB"
LICENSEMAPPINGS["LPL-1.0"]="LPL-1.0"
LICENSEMAPPINGS["LPL-1.02"]="LPL-1.02"
LICENSEMAPPINGS["LPPL-1.0"]="LPPL-1.0"
LICENSEMAPPINGS["LPPL-1.1"]="LPPL-1.1"
LICENSEMAPPINGS["LPPL-1.2"]="LPPL-1.2"
LICENSEMAPPINGS["LPPL-1.3a"]="LPPL-1.3a"
LICENSEMAPPINGS["LPPL-1.3c"]="LPPL-1.3c"
LICENSEMAPPINGS["MakeIndex"]="MakeIndex"
LICENSEMAPPINGS["MirOS"]="MirOS"
LICENSEMAPPINGS["MIT"]="MIT"
LICENSEMAPPINGS["MIT-0"]="MIT-0"
LICENSEMAPPINGS["MIT-advertising"]="MIT-advertising"
LICENSEMAPPINGS["MIT-CMU"]="MIT-CMU"
LICENSEMAPPINGS["MIT-enna"]="MIT-enna"
LICENSEMAPPINGS["MIT-feh"]="MIT-feh"
LICENSEMAPPINGS["MIT-Modern-Variant"]="MIT-Modern-Variant"
LICENSEMAPPINGS["MIT-open-group"]="MIT-open-group"
LICENSEMAPPINGS["MITNFA"]="MITNFA"
LICENSEMAPPINGS["Motosoto"]="Motosoto"
LICENSEMAPPINGS["mpich2"]="mpich2"
LICENSEMAPPINGS["MPL-1.0"]="MPL-1.0"
LICENSEMAPPINGS["MPL-1.1"]="MPL-1.1"
LICENSEMAPPINGS["MPL-2.0"]="MPL-2.0"
LICENSEMAPPINGS["MPL-2.0-no-copyleft-exception"]="MPL-2.0-no-copyleft-exception"
LICENSEMAPPINGS["MS-PL"]="MS-PL"
LICENSEMAPPINGS["MS-RL"]="MS-RL"
LICENSEMAPPINGS["MTLL"]="MTLL"
LICENSEMAPPINGS["MulanPSL-1.0"]="MulanPSL-1.0"
LICENSEMAPPINGS["MulanPSL-2.0"]="MulanPSL-2.0"
LICENSEMAPPINGS["Multics"]="Multics"
LICENSEMAPPINGS["Mup"]="Mup"
LICENSEMAPPINGS["NAIST-2003"]="NAIST-2003"
LICENSEMAPPINGS["NASA-1.3"]="NASA-1.3"
LICENSEMAPPINGS["Naumen"]="Naumen"
LICENSEMAPPINGS["NBPL-1.0"]="NBPL-1.0"
LICENSEMAPPINGS["NCGL-UK-2.0"]="NCGL-UK-2.0"
LICENSEMAPPINGS["NCSA"]="NCSA"
LICENSEMAPPINGS["Net-SNMP"]="Net-SNMP"
LICENSEMAPPINGS["NetCDF"]="NetCDF"
LICENSEMAPPINGS["Newsletr"]="Newsletr"
LICENSEMAPPINGS["NGPL"]="NGPL"
LICENSEMAPPINGS["NIST-PD"]="NIST-PD"
LICENSEMAPPINGS["NIST-PD-fallback"]="NIST-PD-fallback"
LICENSEMAPPINGS["NLOD-1.0"]="NLOD-1.0"
LICENSEMAPPINGS["NLPL"]="NLPL"
LICENSEMAPPINGS["Nokia"]="Nokia"
LICENSEMAPPINGS["NOSL"]="NOSL"
LICENSEMAPPINGS["Noweb"]="Noweb"
LICENSEMAPPINGS["NPL-1.0"]="NPL-1.0"
LICENSEMAPPINGS["NPL-1.1"]="NPL-1.1"
LICENSEMAPPINGS["NPOSL-3.0"]="NPOSL-3.0"
LICENSEMAPPINGS["NRL"]="NRL"
LICENSEMAPPINGS["NTP"]="NTP"
LICENSEMAPPINGS["NTP-0"]="NTP-0"
LICENSEMAPPINGS["O-UDA-1.0"]="O-UDA-1.0"
LICENSEMAPPINGS["OCCT-PL"]="OCCT-PL"
LICENSEMAPPINGS["OCLC-2.0"]="OCLC-2.0"
LICENSEMAPPINGS["ODbL-1.0"]="ODbL-1.0"
LICENSEMAPPINGS["ODC-By-1.0"]="ODC-By-1.0"
LICENSEMAPPINGS["OFL-1.0"]="OFL-1.0"
LICENSEMAPPINGS["OFL-1.0-no-RFN"]="OFL-1.0-no-RFN"
LICENSEMAPPINGS["OFL-1.0-RFN"]="OFL-1.0-RFN"
LICENSEMAPPINGS["OFL-1.1"]="OFL-1.1"
LICENSEMAPPINGS["OFL-1.1-no-RFN"]="OFL-1.1-no-RFN"
LICENSEMAPPINGS["OFL-1.1-RFN"]="OFL-1.1-RFN"
LICENSEMAPPINGS["OGC-1.0"]="OGC-1.0"
LICENSEMAPPINGS["OGDL-Taiwan-1.0"]="OGDL-Taiwan-1.0"
LICENSEMAPPINGS["OGL-Canada-2.0"]="OGL-Canada-2.0"
LICENSEMAPPINGS["OGL-UK-1.0"]="OGL-UK-1.0"
LICENSEMAPPINGS["OGL-UK-2.0"]="OGL-UK-2.0"
LICENSEMAPPINGS["OGL-UK-3.0"]="OGL-UK-3.0"
LICENSEMAPPINGS["OGTSL"]="OGTSL"
LICENSEMAPPINGS["OLDAP-1.1"]="OLDAP-1.1"
LICENSEMAPPINGS["OLDAP-1.2"]="OLDAP-1.2"
LICENSEMAPPINGS["OLDAP-1.3"]="OLDAP-1.3"
LICENSEMAPPINGS["OLDAP-1.4"]="OLDAP-1.4"
LICENSEMAPPINGS["OLDAP-2.0"]="OLDAP-2.0"
LICENSEMAPPINGS["OLDAP-2.0.1"]="OLDAP-2.0.1"
LICENSEMAPPINGS["OLDAP-2.1"]="OLDAP-2.1"
LICENSEMAPPINGS["OLDAP-2.2"]="OLDAP-2.2"
LICENSEMAPPINGS["OLDAP-2.2.1"]="OLDAP-2.2.1"
LICENSEMAPPINGS["OLDAP-2.2.2"]="OLDAP-2.2.2"
LICENSEMAPPINGS["OLDAP-2.3"]="OLDAP-2.3"
LICENSEMAPPINGS["OLDAP-2.4"]="OLDAP-2.4"
LICENSEMAPPINGS["OLDAP-2.5"]="OLDAP-2.5"
LICENSEMAPPINGS["OLDAP-2.6"]="OLDAP-2.6"
LICENSEMAPPINGS["OLDAP-2.7"]="OLDAP-2.7"
LICENSEMAPPINGS["OLDAP-2.8"]="OLDAP-2.8"
LICENSEMAPPINGS["OML"]="OML"
LICENSEMAPPINGS["OpenSSL"]="OpenSSL"
LICENSEMAPPINGS["OPL-1.0"]="OPL-1.0"
LICENSEMAPPINGS["OSET-PL-2.1"]="OSET-PL-2.1"
LICENSEMAPPINGS["OSL-1.0"]="OSL-1.0"
LICENSEMAPPINGS["OSL-1.1"]="OSL-1.1"
LICENSEMAPPINGS["OSL-2.0"]="OSL-2.0"
LICENSEMAPPINGS["OSL-2.1"]="OSL-2.1"
LICENSEMAPPINGS["OSL-3.0"]="OSL-3.0"
LICENSEMAPPINGS["Parity-6.0.0"]="Parity-6.0.0"
LICENSEMAPPINGS["Parity-7.0.0"]="Parity-7.0.0"
LICENSEMAPPINGS["PDDL-1.0"]="PDDL-1.0"
LICENSEMAPPINGS["PHP-3.0"]="PHP-3.0"
LICENSEMAPPINGS["PHP-3.01"]="PHP-3.01"
LICENSEMAPPINGS["Plexus"]="Plexus"
LICENSEMAPPINGS["PolyForm-Noncommercial-1.0.0"]="PolyForm-Noncommercial-1.0.0"
LICENSEMAPPINGS["PolyForm-Small-Business-1.0.0"]="PolyForm-Small-Business-1.0.0"
LICENSEMAPPINGS["PostgreSQL"]="PostgreSQL"
LICENSEMAPPINGS["PSF-2.0"]="PSF-2.0"
LICENSEMAPPINGS["psfrag"]="psfrag"
LICENSEMAPPINGS["psutils"]="psutils"
LICENSEMAPPINGS["Python-2.0"]="Python-2.0"
LICENSEMAPPINGS["Qhull"]="Qhull"
LICENSEMAPPINGS["QPL-1.0"]="QPL-1.0"
LICENSEMAPPINGS["Rdisc"]="Rdisc"
LICENSEMAPPINGS["RHeCos-1.1"]="RHeCos-1.1"
LICENSEMAPPINGS["RPL-1.1"]="RPL-1.1"
LICENSEMAPPINGS["RPL-1.5"]="RPL-1.5"
LICENSEMAPPINGS["RPSL-1.0"]="RPSL-1.0"
LICENSEMAPPINGS["RSA-MD"]="RSA-MD"
LICENSEMAPPINGS["RSCPL"]="RSCPL"
LICENSEMAPPINGS["Ruby"]="Ruby"
LICENSEMAPPINGS["SAX-PD"]="SAX-PD"
LICENSEMAPPINGS["Saxpath"]="Saxpath"
LICENSEMAPPINGS["SCEA"]="SCEA"
LICENSEMAPPINGS["Sendmail"]="Sendmail"
LICENSEMAPPINGS["Sendmail-8.23"]="Sendmail-8.23"
LICENSEMAPPINGS["SGI-B-1.0"]="SGI-B-1.0"
LICENSEMAPPINGS["SGI-B-1.1"]="SGI-B-1.1"
LICENSEMAPPINGS["SGI-B-2.0"]="SGI-B-2.0"
LICENSEMAPPINGS["SHL-0.5"]="SHL-0.5"
LICENSEMAPPINGS["SHL-0.51"]="SHL-0.51"
LICENSEMAPPINGS["SimPL-2.0"]="SimPL-2.0"
LICENSEMAPPINGS["SISSL"]="SISSL"
LICENSEMAPPINGS["SISSL-1.2"]="SISSL-1.2"
LICENSEMAPPINGS["Sleepycat"]="Sleepycat"
LICENSEMAPPINGS["SMLNJ"]="SMLNJ"
LICENSEMAPPINGS["SMPPL"]="SMPPL"
LICENSEMAPPINGS["SNIA"]="SNIA"
LICENSEMAPPINGS["Spencer-86"]="Spencer-86"
LICENSEMAPPINGS["Spencer-94"]="Spencer-94"
LICENSEMAPPINGS["Spencer-99"]="Spencer-99"
LICENSEMAPPINGS["SPL-1.0"]="SPL-1.0"
LICENSEMAPPINGS["SSH-OpenSSH"]="SSH-OpenSSH"
LICENSEMAPPINGS["SSH-short"]="SSH-short"
LICENSEMAPPINGS["SSPL-1.0"]="SSPL-1.0"
LICENSEMAPPINGS["SugarCRM-1.1.3"]="SugarCRM-1.1.3"
LICENSEMAPPINGS["SWL"]="SWL"
LICENSEMAPPINGS["TAPR-OHL-1.0"]="TAPR-OHL-1.0"
LICENSEMAPPINGS["TCL"]="TCL"
LICENSEMAPPINGS["TCP-wrappers"]="TCP-wrappers"
LICENSEMAPPINGS["TMate"]="TMate"
LICENSEMAPPINGS["TORQUE-1.1"]="TORQUE-1.1"
LICENSEMAPPINGS["TOSL"]="TOSL"
LICENSEMAPPINGS["TU-Berlin-1.0"]="TU-Berlin-1.0"
LICENSEMAPPINGS["TU-Berlin-2.0"]="TU-Berlin-2.0"
LICENSEMAPPINGS["UCL-1.0"]="UCL-1.0"
LICENSEMAPPINGS["Unicode-DFS-2015"]="Unicode-DFS-2015"
LICENSEMAPPINGS["Unicode-DFS-2016"]="Unicode-DFS-2016"
LICENSEMAPPINGS["Unicode-TOU"]="Unicode-TOU"
LICENSEMAPPINGS["Unlicense"]="Unlicense"
LICENSEMAPPINGS["UPL-1.0"]="UPL-1.0"
LICENSEMAPPINGS["Vim"]="Vim"
LICENSEMAPPINGS["VOSTROM"]="VOSTROM"
LICENSEMAPPINGS["VSL-1.0"]="VSL-1.0"
LICENSEMAPPINGS["W3C"]="W3C"
LICENSEMAPPINGS["W3C-19980720"]="W3C-19980720"
LICENSEMAPPINGS["W3C-20150513"]="W3C-20150513"
LICENSEMAPPINGS["Watcom-1.0"]="Watcom-1.0"
LICENSEMAPPINGS["Wsuipa"]="Wsuipa"
LICENSEMAPPINGS["WTFPL"]="WTFPL"
LICENSEMAPPINGS["X11"]="X11"
LICENSEMAPPINGS["Xerox"]="Xerox"
LICENSEMAPPINGS["XFree86-1.1"]="XFree86-1.1"
LICENSEMAPPINGS["xinetd"]="xinetd"
LICENSEMAPPINGS["Xnet"]="Xnet"
LICENSEMAPPINGS["xpp"]="xpp"
LICENSEMAPPINGS["XSkat"]="XSkat"
LICENSEMAPPINGS["YPL-1.0"]="YPL-1.0"
LICENSEMAPPINGS["YPL-1.1"]="YPL-1.1"
LICENSEMAPPINGS["Zed"]="Zed"
LICENSEMAPPINGS["Zend-2.0"]="Zend-2.0"
LICENSEMAPPINGS["Zimbra-1.3"]="Zimbra-1.3"
LICENSEMAPPINGS["Zimbra-1.4"]="Zimbra-1.4"
LICENSEMAPPINGS["Zlib"]="Zlib"
LICENSEMAPPINGS["zlib-acknowledgement"]="zlib-acknowledgement"
LICENSEMAPPINGS["ZPL-1.1"]="ZPL-1.1"
LICENSEMAPPINGS["ZPL-2.0"]="ZPL-2.0"
LICENSEMAPPINGS["ZPL-2.1"]="ZPL-2.1"
# PDL Mappings
LICENSEMAPPINGS["389 Directory Server Exception"]="389-exception"
LICENSEMAPPINGS["3dfx Glide License"]="Glide"
LICENSEMAPPINGS["3DFX GLIDE Source Code General Public License"]="Glide"
LICENSEMAPPINGS["Abstyles License"]="Abstyles"
LICENSEMAPPINGS["Academic Free License v1.1"]="AFL-1.1"
LICENSEMAPPINGS["Academic Free License v1.2"]="AFL-1.2"
LICENSEMAPPINGS["Academic Free License v2.0"]="AFL-2.0"
LICENSEMAPPINGS["Academic Free License v2.1"]="AFL-2.1"
LICENSEMAPPINGS["Academic Free License v3.0"]="AFL-3.0"
LICENSEMAPPINGS["Academy of Motion Picture Arts and Sciences BSD"]="AMPAS"
LICENSEMAPPINGS["Adaptive Public License 1.0"]="APL-1.0"
LICENSEMAPPINGS["Adobe Glyph List License"]="Adobe-Glyph"
LICENSEMAPPINGS["Adobe Postscript AFM License"]="APAFML"
LICENSEMAPPINGS["Adobe Systems Incorporated Source Code License Agreement"]="Adobe-2006"
LICENSEMAPPINGS["Affero General Public License v1.0"]="AGPL-1.0"
LICENSEMAPPINGS["Afmparse License"]="Afmparse"
LICENSEMAPPINGS["Aladdin Free Public License v8"]="Aladdin"
LICENSEMAPPINGS["Allegro Giftware License"]="Giftware"
LICENSEMAPPINGS["Amazon Digital Services License"]="ADSL"
LICENSEMAPPINGS["AMD's plpa_map.c License"]="AMDPLPA"
LICENSEMAPPINGS["ANTLR Software Rights Notice"]="ANTLR-PD"
LICENSEMAPPINGS["Apache License 1.0"]="Apache-1.0"
LICENSEMAPPINGS["Apache License 1.1"]="Apache-1.1"
LICENSEMAPPINGS["Apache License 2.0"]="Apache-2.0"
LICENSEMAPPINGS["Apple MIT License"]="AML"
LICENSEMAPPINGS["Apple Public Source License 1.0"]="APSL-1.0"
LICENSEMAPPINGS["Apple Public Source License 1.1"]="APSL-1.1"
LICENSEMAPPINGS["Apple Public Source License 1.2"]="APSL-1.2"
LICENSEMAPPINGS["Apple Public Source License 2.0"]="APSL-2.0"
LICENSEMAPPINGS["Artistic License 1.0"]="Artistic-1.0"
LICENSEMAPPINGS["Artistic License 1.0 w/clause 8"]="Artistic-1.0-cl8"
LICENSEMAPPINGS["Artistic License 2.0"]="Artistic-2.0"
LICENSEMAPPINGS["Attribution Assurance License"]="AAL"
LICENSEMAPPINGS["Autoconf exception 2.0"]="Autoconf-exception-2.0"
LICENSEMAPPINGS["Autoconf exception 3.0"]="Autoconf-exception-3.0"
LICENSEMAPPINGS["Bahyph License"]="Bahyph"
LICENSEMAPPINGS["Barr License"]="Barr"
LICENSEMAPPINGS["Bison exception 2.2"]="Bison-exception-2.2"
LICENSEMAPPINGS["BitTorrent Open Source License v1.0"]="BitTorrent-1.0"
LICENSEMAPPINGS["BitTorrent Open Source License v1.1"]="BitTorrent-1.1"
LICENSEMAPPINGS["Boost Software License 1.0"]="BSL-1.0"
LICENSEMAPPINGS["Bootloader Distribution Exception"]="Bootloader-exception"
LICENSEMAPPINGS["Borceux license"]="Borceux"
LICENSEMAPPINGS["BSD 1-Clause License"]="BSD-1-Clause"
LICENSEMAPPINGS["BSD 2-Clause \"Simplified\" License"]="BSD-2-Clause"
LICENSEMAPPINGS["BSD 2-clause \"Simplified\" or \"FreeBSD\" License"]="BSD-2-Clause"
LICENSEMAPPINGS["BSD 2-clause FreeBSD License"]="BSD-2-Clause-FreeBSD"
LICENSEMAPPINGS["BSD 2-clause NetBSD License"]="BSD-2-Clause-NetBSD"
LICENSEMAPPINGS["BSD 3-clause \"New\" or \"Revised\" License"]="BSD-3-Clause"
LICENSEMAPPINGS["BSD 3-Clause \"New\" or \"Revised\" License"]="BSD-3-Clause"
LICENSEMAPPINGS["BSD-Style License"]="BSD-3-Clause"
LICENSEMAPPINGS["BSD 3-clause Clear License"]="BSD-3-Clause-Clear"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License"]="BSD-3-Clause-No-Nuclear-License"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License 2014"]="BSD-3-Clause-No-Nuclear-License-2014"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear Warranty"]="BSD-3-Clause-No-Nuclear-Warranty"
LICENSEMAPPINGS["BSD 4-clause \"Original\" or \"Old\" License"]="BSD-4-Clause"
LICENSEMAPPINGS["BSD Protection License"]="BSD-Protection"
LICENSEMAPPINGS["BSD Source Code Attribution"]="BSD-Source-Code"
LICENSEMAPPINGS["BSD with attribution"]="BSD-3-Clause-Attribution"
LICENSEMAPPINGS["BSD Zero Clause License"]="0BSD"
LICENSEMAPPINGS["BSD-2-Clause Plus Patent License"]="BSD-2-Clause-Patent"
LICENSEMAPPINGS["BSD-4-Clause (University of California-Specific)"]="BSD-4-Clause-UC"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.5"]="bzip2-1.0.5"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.6"]="bzip2-1.0.6"
LICENSEMAPPINGS["Caldera License"]="Caldera"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.0"]="CECILL-1.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.1"]="CECILL-1.1"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.0"]="CECILL-2.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.1"]="CECILL-2.1"
LICENSEMAPPINGS["CeCILL-B Free Software License Agreement v1.0"]="CECILL-B"
LICENSEMAPPINGS["CeCILL-C Free Software License Agreement v1.0"]="CECILL-C"
LICENSEMAPPINGS["Clarified Artistic License"]="ClArtistic"
LICENSEMAPPINGS["Classpath exception 2.0"]="Classpath-exception-2.0"
LICENSEMAPPINGS["CLISP exception 2.0"]="CLISP-exception-2.0"
LICENSEMAPPINGS["CMU License"]="MIT-CMU"
LICENSEMAPPINGS["CNRI Python License"]="CNRI-Python"
LICENSEMAPPINGS["CNRI Python Open Source GPL Compatible License Agreement"]="CNRI-Python-GPL-Compatible"
LICENSEMAPPINGS["Common Development and Distribution License"]="CDDL-1.1"
LICENSEMAPPINGS["Common Development and Distribution License 1.0"]="CDDL-1.0"
LICENSEMAPPINGS["Common Development and Distribution License 1.1"]="CDDL-1.1"
LICENSEMAPPINGS["Common Public Attribution License 1.0"]="CPAL-1.0"
LICENSEMAPPINGS["Common Public License"]="CPL-1.0"
LICENSEMAPPINGS["Common Public License 1.0"]="CPL-1.0"
LICENSEMAPPINGS["Community Data License Agreement Permissive 1.0"]="CDLA-Permissive-1.0"
LICENSEMAPPINGS["Community Data License Agreement Sharing 1.0"]="CDLA-Sharing-1.0"
LICENSEMAPPINGS["Computer Associates Trusted Open Source License 1.1"]="CATOSL-1.1"
LICENSEMAPPINGS["Condor Public License v1.1"]="Condor-1.1"
LICENSEMAPPINGS["Creative Commons Attribution 1.0"]="CC-BY-1.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.0"]="CC-BY-2.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.5"]="CC-BY-2.5"
LICENSEMAPPINGS["Creative Commons Attribution 3.0"]="CC-BY-3.0"
LICENSEMAPPINGS["Creative Commons Attribution 3.0 Unported"]="CC-BY-3.0"
LICENSEMAPPINGS["Creative Commons Attribution 4.0"]="CC-BY-4.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 1.0"]="CC-BY-ND-1.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.0"]="CC-BY-ND-2.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.5"]="CC-BY-ND-2.5"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 3.0"]="CC-BY-ND-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 1.0"]="CC-BY-NC-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.0"]="CC-BY-NC-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.5"]="CC-BY-NC-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 3.0"]="CC-BY-NC-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 1.0"]="CC-BY-NC-ND-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.0"]="CC-BY-NC-ND-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.5"]="CC-BY-NC-ND-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 3.0"]="CC-BY-NC-ND-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 1.0"]="CC-BY-NC-SA-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.0"]="CC-BY-NC-SA-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.5"]="CC-BY-NC-SA-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 3.0"]="CC-BY-NC-SA-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 1.0"]="CC-BY-SA-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.0"]="CC-BY-SA-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.5"]="CC-BY-SA-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 3.0"]="CC-BY-SA-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 4.0"]="CC-BY-SA-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NoDerivatives 4.0"]="CC-BY-ND-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial 4.0"]="CC-BY-NC-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-NoDerivatives 4.0"]="CC-BY-NC-ND-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-ShareAlike 4.0"]="CC-BY-NC-SA-4.0"
LICENSEMAPPINGS["Creative Commons CC0 1.0 Universal"]="CC0-1.0"
LICENSEMAPPINGS["Crossword License"]="Crossword"
LICENSEMAPPINGS["CrystalStacker License"]="CrystalStacker"
LICENSEMAPPINGS["CUA Office Public License v1.0"]="CUA-OPL-1.0"
LICENSEMAPPINGS["Cube License"]="Cube"
LICENSEMAPPINGS["Deutsche Freie Software Lizenz"]="D-FSL-1.0"
LICENSEMAPPINGS["diffmark license"]="diffmark"
LICENSEMAPPINGS["DigiRule FOSS License Exception"]="DigiRule-FOSS-exception"
LICENSEMAPPINGS["Do What The Fuck You Want To Public License"]="WTFPL"
LICENSEMAPPINGS["DOC Software License"]="DOC"
LICENSEMAPPINGS["DOC License"]="DOC"
LICENSEMAPPINGS["Dotseqn License"]="Dotseqn"
LICENSEMAPPINGS["DSDP License"]="DSDP"
LICENSEMAPPINGS["dvipdfm License"]="dvipdfm"
LICENSEMAPPINGS["Eclipse Public License 1.0"]="EPL-1.0"
LICENSEMAPPINGS["Eclipse Public License 2.0"]="EPL-2.0"
LICENSEMAPPINGS["eCos exception 2.0"]="eCos-exception-2.0"
LICENSEMAPPINGS["eCos license version 2.0"]="eCos-2.0"
LICENSEMAPPINGS["Educational Community License v1.0"]="ECL-1.0"
LICENSEMAPPINGS["Educational Community License v2.0"]="ECL-2.0"
LICENSEMAPPINGS["eGenix.com Public License 1.1.0"]="eGenix"
LICENSEMAPPINGS["Eiffel Forum License v1.0"]="EFL-1.0"
LICENSEMAPPINGS["Eiffel Forum License v2.0"]="EFL-2.0"
LICENSEMAPPINGS["Enlightenment License (e16)"]="MIT-advertising"
LICENSEMAPPINGS["enna License"]="MIT-enna"
LICENSEMAPPINGS["Entessa Public License"]="Entessa"
LICENSEMAPPINGS["Erlang Public License v1.1"]="ErlPL-1.1"
LICENSEMAPPINGS["EU DataGrid Software License"]="EUDatagrid"
LICENSEMAPPINGS["European Union Public License 1.0"]="EUPL-1.0"
LICENSEMAPPINGS["European Union Public License 1.1"]="EUPL-1.1"
LICENSEMAPPINGS["European Union Public License 1.2"]="EUPL-1.2"
LICENSEMAPPINGS["Eurosym License v2"]="Eurosym"
LICENSEMAPPINGS["Fair License"]="Fair"
LICENSEMAPPINGS["FastCGI"]="OML"
LICENSEMAPPINGS["Fawkes Runtime Exception"]="Fawkes-Runtime-exception"
LICENSEMAPPINGS["feh License"]="MIT-feh"
LICENSEMAPPINGS["FLTK exception"]="FLTK-exception"
LICENSEMAPPINGS["Font exception 2.0"]="Font-exception-2.0"
LICENSEMAPPINGS["Frameworx Open License 1.0"]="Frameworx-1.0"
LICENSEMAPPINGS["FreeImage Public License v1.0"]="FreeImage"
LICENSEMAPPINGS["FreeRTOS Exception 2.0"]="freertos-exception-2.0"
LICENSEMAPPINGS["FreeType License"]="FTL"
LICENSEMAPPINGS["FSF All Permissive License"]="FSFAP"
LICENSEMAPPINGS["FSF Unlimited License"]="FSFUL"
LICENSEMAPPINGS["FSF Unlimited License (with License Retention)"]="FSFULLR"
LICENSEMAPPINGS["GCC Runtime Library exception 2.0"]="GCC-exception-2.0"
LICENSEMAPPINGS["GCC Runtime Library exception 3.1"]="GCC-exception-3.1"
LICENSEMAPPINGS["GL2PS License, Version 2"]="GL2PS"
LICENSEMAPPINGS["Glulxe License"]="Glulxe"
LICENSEMAPPINGS["GNU Affero General Public License v3.0"]="AGPL-3.0"
LICENSEMAPPINGS["GNU Free Documentation License v1.1"]="GFDL-1.1"
LICENSEMAPPINGS["GNU Free Documentation License v1.2"]="GFDL-1.2"
LICENSEMAPPINGS["GNU Free Documentation License v1.3"]="GFDL-1.3"
LICENSEMAPPINGS["GNU General Public License v1.0"]="GPL-1.0-only"
LICENSEMAPPINGS["GNU General Public License v1.0 only"]="GPL-1.0-only"
LICENSEMAPPINGS["GNU General Public License v1.0 or later"]="GPL-1.0-or-later"
LICENSEMAPPINGS["GNU General Public License v2.0"]="GPL-2.0-only"
LICENSEMAPPINGS["GNU General Public License v2.0 only"]="GPL-2.0-only"
LICENSEMAPPINGS["GNU General Public License v2.0 or later"]="GPL-2.0-or-later"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Autoconf exception"]="GPL-2.0-with-autoconf-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Bison exception"]="GPL-2.0-with-bison-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Font exception"]="GPL-2.0-with-font-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/GCC Runtime Library exception"]="GPL-2.0-with-GCC-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 with Classpath Exception"]="GPL-2.0-with-classpath-exception"
LICENSEMAPPINGS["GNU General Public License v3.0"]="GPL-3.0-only"
LICENSEMAPPINGS["GNU General Public License v3.0 only"]="GPL-3.0-only"
LICENSEMAPPINGS["GNU General Public License v3.0 or later"]="GPL-3.0-or-later"
LICENSEMAPPINGS["GNU General Public License v3.0 w/Autoconf exception"]="GPL-3.0-with-autoconf-exception"
LICENSEMAPPINGS["GNU General Public License v3.0 w/GCC Runtime Library exception"]="GPL-3.0-with-GCC-exception"
LICENSEMAPPINGS["GNU JavaMail exception"]="gnu-javamail-exception"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1"]="LGPL-2.1-only"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1 or later"]="LGPL-2.1-or-later"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0"]="LGPL-3.0-only"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0 only"]="LGPL-3.0-only"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0 or later"]="LGPL-3.0-or-later"
LICENSEMAPPINGS["GNU Library General Public License v2.0"]="LGPL-2.0-only"
LICENSEMAPPINGS["GNU Library General Public License v2.0 only"]="LGPL-2.0-only"
LICENSEMAPPINGS["GNU Library General Public License v2.0 or later"]="LGPL-2.0-or-later"
LICENSEMAPPINGS["gnuplot License"]="gnuplot"
LICENSEMAPPINGS["gSOAP Public License v1.3b"]="gSOAP-1.3b"
LICENSEMAPPINGS["Haskell Language Report License"]="HaskellReport"
LICENSEMAPPINGS["Historic Permission Notice and Disclaimer"]="HPND"
LICENSEMAPPINGS["i2p GPL+Java Exception"]="i2p-gpl-java-exception"
LICENSEMAPPINGS["IBM PowerPC Initialization and Boot Software"]="IBM-pibs"
LICENSEMAPPINGS["IBM Public License v1.0"]="IPL-1.0"
LICENSEMAPPINGS["ICU License"]="ICU"
LICENSEMAPPINGS["ImageMagick (Apache 2.0) License"]="ImageMagick"
LICENSEMAPPINGS["iMatix Standard Function Library Agreement"]="iMatix"
LICENSEMAPPINGS["Imlib2 License"]="Imlib2"
LICENSEMAPPINGS["Independent JPEG Group License"]="IJG"
LICENSEMAPPINGS["Info-ZIP License"]="Info-ZIP"
LICENSEMAPPINGS["Intel ACPI Software License Agreement"]="Intel-ACPI"
LICENSEMAPPINGS["Intel Open Source License"]="Intel"
LICENSEMAPPINGS["Interbase Public License v1.0"]="Interbase-1.0"
LICENSEMAPPINGS["IPA Font License"]="IPA"
LICENSEMAPPINGS["ISC License"]="ISC"
LICENSEMAPPINGS["ISC License (ISC)"]="ISC"
LICENSEMAPPINGS["JasPer License Version 2.0"]="JasPer-2.0"
LICENSEMAPPINGS["Jython License"]="CNRI-Jython"
LICENSEMAPPINGS["LaTeX Project Public License v1.0"]="LPPL-1.0"
LICENSEMAPPINGS["LaTeX Project Public License v1.1"]="LPPL-1.1"
LICENSEMAPPINGS["LaTeX Project Public License v1.2"]="LPPL-1.2"
LICENSEMAPPINGS["LaTeX Project Public License v1.3a"]="LPPL-1.3a"
LICENSEMAPPINGS["LaTeX Project Public License v1.3c"]="LPPL-1.3c"
LICENSEMAPPINGS["Latex2e License"]="Latex2e"
LICENSEMAPPINGS["Lawrence Berkeley National Labs BSD variant license"]="BSD-3-Clause-LBNL"
LICENSEMAPPINGS["Leptonica License"]="Leptonica"
LICENSEMAPPINGS["Lesser General Public License For Linguistic Resources"]="LGPLLR"
LICENSEMAPPINGS["libpng License"]="Libpng"
LICENSEMAPPINGS["libtiff License"]="libtiff"
LICENSEMAPPINGS["Libtool Exception"]="Libtool-exception"
LICENSEMAPPINGS["Licence Art Libre 1.2"]="LAL-1.2"
LICENSEMAPPINGS["Licence Art Libre 1.3"]="LAL-1.3"
LICENSEMAPPINGS["Licence Libre du Québec – Permissive version 1.1"]="LiLiQ-P-1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité forte version 1.1"]="LiLiQ-Rplus-1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité version 1.1"]="LiLiQ-R-1.1"
LICENSEMAPPINGS["Linux Kernel Variant of OpenIB.org license"]="Linux-OpenIB"
LICENSEMAPPINGS["Linux Syscall Note"]="Linux-syscall-note"
LICENSEMAPPINGS["LLVM Exception"]="LLVM-exception"
LICENSEMAPPINGS["Lucent Public License v1.0"]="LPL-1.0"
LICENSEMAPPINGS["Lucent Public License v1.02 (Plan9)"]="LPL-1.02"
LICENSEMAPPINGS["LZMA exception"]="LZMA-exception"
LICENSEMAPPINGS["Macros and Inline Functions Exception"]="mif-exception"
LICENSEMAPPINGS["MakeIndex License"]="MakeIndex"
LICENSEMAPPINGS["Matrix Template Library License"]="MTLL"
LICENSEMAPPINGS["Microsoft Public License (Ms-PL)"]="MS-PL"
LICENSEMAPPINGS["Microsoft Reciprocal License (Ms-RL)"]="MS-RL"
LICENSEMAPPINGS["MirOS Licence"]="MirOS"
LICENSEMAPPINGS["MIT +no-false-attribs license"]="MITNFA"
LICENSEMAPPINGS["MIT License (Expat)"]="MIT"
LICENSEMAPPINGS["MIT License"]="MIT"
LICENSEMAPPINGS["MIT-Style License"]="MIT"
LICENSEMAPPINGS["MIT No Attribution"]="MIT-0"
LICENSEMAPPINGS["Motosoto License"]="Motosoto"
LICENSEMAPPINGS["Mozilla Public License 1.0"]="MPL-1.0"
LICENSEMAPPINGS["Mozilla Public License 1.1"]="MPL-1.1"
LICENSEMAPPINGS["Mozilla Public License 2.0"]="MPL-2.0"
LICENSEMAPPINGS["Mozilla Public License 2.0 (no copyleft exception)"]="MPL-2.0-no-copyleft-exception"
LICENSEMAPPINGS["MPICH2 License"]="mpich2"
LICENSEMAPPINGS["Multics License"]="Multics"
LICENSEMAPPINGS["Mup License"]="Mup"
LICENSEMAPPINGS["NASA Open Source Agreement 1.3"]="NASA-1.3"
LICENSEMAPPINGS["Naumen Public License"]="Naumen"
LICENSEMAPPINGS["Net Boolean Public License v1"]="NBPL-1.0"
LICENSEMAPPINGS["netCDF License"]="NetCDF"
LICENSEMAPPINGS["Nethack General Public License"]="NGPL"
LICENSEMAPPINGS["Netizen Open Source License v1.0"]="NOSL"
LICENSEMAPPINGS["Netscape Public License 1.0"]="NPL-1.0"
LICENSEMAPPINGS["Netscape Public License 1.1"]="NPL-1.1"
LICENSEMAPPINGS["Net-SNMP License"]="Net-SNMP"
LICENSEMAPPINGS["Newsletr License"]="Newsletr"
LICENSEMAPPINGS["No Limit Public License"]="NLPL"
LICENSEMAPPINGS["Nokia Open Source License"]="Nokia"
LICENSEMAPPINGS["Nokia Qt LGPL exception 1.1"]="Nokia-Qt-exception-1.1"
LICENSEMAPPINGS["Non-Profit Open Software License 3.0"]="NPOSL-3.0"
LICENSEMAPPINGS["Norwegian Licence for Open Government Data"]="NLOD-1.0"
LICENSEMAPPINGS["Noweb License"]="Noweb"
LICENSEMAPPINGS["NTP License"]="NTP"
LICENSEMAPPINGS["Nunit License"]="Nunit"
LICENSEMAPPINGS["OCLC Research Public License 2.0"]="OCLC-2.0"
LICENSEMAPPINGS["ODC Open Database License v1.0"]="ODbL-1.0"
LICENSEMAPPINGS["ODC Public Domain Dedication & License 1.0"]="PDDL-1.0"
LICENSEMAPPINGS["Open CASCADE Exception 1.0"]="OCCT-exception-1.0"
LICENSEMAPPINGS["Open CASCADE Technology Public License"]="OCCT-PL"
LICENSEMAPPINGS["Open Group Test Suite License"]="OGTSL"
LICENSEMAPPINGS["Open LDAP Public License 2.2.2"]="OLDAP-2.2.2"
LICENSEMAPPINGS["Open LDAP Public License v1.1"]="OLDAP-1.1"
LICENSEMAPPINGS["Open LDAP Public License v1.3"]="OLDAP-1.3"
LICENSEMAPPINGS["Open LDAP Public License v1.4"]="OLDAP-1.4"
LICENSEMAPPINGS["Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"]="OLDAP-2.0"
LICENSEMAPPINGS["Open LDAP Public License v2.1"]="OLDAP-2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.2"]="OLDAP-2.2"
LICENSEMAPPINGS["Open LDAP Public License v2.2.1"]="OLDAP-2.2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.5"]="OLDAP-2.5"
LICENSEMAPPINGS["Open LDAP Public License v2.6"]="OLDAP-2.6"
LICENSEMAPPINGS["Open Public License v1.0"]="OPL-1.0"
LICENSEMAPPINGS["Open Software License 1.0"]="OSL-1.0"
LICENSEMAPPINGS["Open Software License 1.1"]="OSL-1.1"
LICENSEMAPPINGS["Open Software License 2.0"]="OSL-2.0"
LICENSEMAPPINGS["Open Software License 2.1"]="OSL-2.1"
LICENSEMAPPINGS["Open Software License 3.0"]="OSL-3.0"
LICENSEMAPPINGS["OpenJDK Assembly exception 1.0"]="OpenJDK-assembly-exception-1.0"
LICENSEMAPPINGS["OpenLDAP Public License v1.2"]="OLDAP-1.2"
LICENSEMAPPINGS["OpenLDAP Public License v2.0.1"]="OLDAP-2.0.1"
LICENSEMAPPINGS["OpenLDAP Public License v2.3"]="OLDAP-2.3"
LICENSEMAPPINGS["OpenLDAP Public License v2.4"]="OLDAP-2.4"
LICENSEMAPPINGS["OpenLDAP Public License v2.7"]="OLDAP-2.7"
LICENSEMAPPINGS["OpenLDAP Public License v2.8"]="OLDAP-2.8"
LICENSEMAPPINGS["OpenSSL License"]="OpenSSL"
LICENSEMAPPINGS["OpenVPN OpenSSL Exception"]="openvpn-openssl-exception"
LICENSEMAPPINGS["OSET Public License version 2.1"]="OSET-PL-2.1"
LICENSEMAPPINGS["PERL Artistic License"]="Artistic-1.0-Perl"
LICENSEMAPPINGS["PHP License v3.0"]="PHP-3.0"
LICENSEMAPPINGS["PHP License v3.01"]="PHP-3.01"
LICENSEMAPPINGS["Plexus Classworlds License"]="Plexus"
LICENSEMAPPINGS["psfrag License"]="psfrag"
LICENSEMAPPINGS["psutils License"]="psutils"
LICENSEMAPPINGS["Python License 2.0"]="Python-2.0"
LICENSEMAPPINGS["Q Public License 1.0"]="QPL-1.0"
LICENSEMAPPINGS["Qhull License"]="Qhull"
LICENSEMAPPINGS["Qwt exception 1.0"]="Qwt-exception-1.0"
LICENSEMAPPINGS["Rdisc License"]="Rdisc"
LICENSEMAPPINGS["RealNetworks Public Source License v1.0"]="RPSL-1.0"
LICENSEMAPPINGS["Reciprocal Public License"]="RPL-1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.1"]="RPL-1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.5"]="RPL-1.5"
LICENSEMAPPINGS["Red Hat eCos Public License v1.1"]="RHeCos-1.1"
LICENSEMAPPINGS["Ricoh Source Code Public License"]="RSCPL"
LICENSEMAPPINGS["RSA Message-Digest License"]="RSA-MD"
LICENSEMAPPINGS["Ruby License"]="Ruby"
LICENSEMAPPINGS["Sax Public Domain Notice"]="SAX-PD"
LICENSEMAPPINGS["Saxpath License"]="Saxpath"
LICENSEMAPPINGS["SCEA Shared Source License"]="SCEA"
LICENSEMAPPINGS["Scheme Widget Library (SWL) Software License Agreement"]="SWL"
LICENSEMAPPINGS["Secure Messaging Protocol Public License"]="SMPPL"
LICENSEMAPPINGS["Sendmail License"]="Sendmail"
LICENSEMAPPINGS["SGI Free Software License B v1.0"]="SGI-B-1.0"
LICENSEMAPPINGS["SGI Free Software License B v1.1"]="SGI-B-1.1"
LICENSEMAPPINGS["SGI Free Software License B v2.0"]="SGI-B-2.0"
LICENSEMAPPINGS["SIL Open Font License 1.0"]="OFL-1.0"
LICENSEMAPPINGS["SIL Open Font License 1.1"]="OFL-1.1"
LICENSEMAPPINGS["Simple Public License 2.0"]="SimPL-2.0"
LICENSEMAPPINGS["Sleepycat License"]="Sleepycat"
LICENSEMAPPINGS["SNIA Public License 1.1"]="SNIA"
LICENSEMAPPINGS["Spencer License 86"]="Spencer-86"
LICENSEMAPPINGS["Spencer License 94"]="Spencer-94"
LICENSEMAPPINGS["Spencer License 99"]="Spencer-99"
LICENSEMAPPINGS["Standard ML of New Jersey License"]="SMLNJ"
LICENSEMAPPINGS["SugarCRM Public License v1.1.3"]="SugarCRM-1.1.3"
LICENSEMAPPINGS["Sun Industry Standards Source License (SISSL) v1.1"]="SISSL"
LICENSEMAPPINGS["Sun Industry Standards Source License v1.2"]="SISSL-1.2"
LICENSEMAPPINGS["Sun Public License v1.0"]="SPL-1.0"
LICENSEMAPPINGS["Sybase Open Watcom Public License 1.0"]="Watcom-1.0"
LICENSEMAPPINGS["Tcl License"]="TCL"
LICENSEMAPPINGS["TCP Wrappers License"]="TCP-wrappers"
LICENSEMAPPINGS["The Beerware License"]="Beerware"
LICENSEMAPPINGS["The Code Project Open License (CPOL) 1.02"]="CPOL-1.02"
LICENSEMAPPINGS["The Curl License"]="curl"
LICENSEMAPPINGS["The JSON License"]="JSON"
LICENSEMAPPINGS["The PostgreSQL License"]="PostgreSQL"
LICENSEMAPPINGS["The Unlicense"]="Unlicense"
LICENSEMAPPINGS["TMate License"]="TMate"
LICENSEMAPPINGS["TORQUE v2.5+ Software License v1.1"]="TORQUE-1.1"
LICENSEMAPPINGS["Trusster Open Source License"]="TOSL"
LICENSEMAPPINGS["U-Boot exception 2.0"]="u-boot-exception-2.0"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2015)"]="Unicode-DFS-2015"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2016)"]="Unicode-DFS-2016"
LICENSEMAPPINGS["Unicode Terms of Use"]="Unicode-TOU"
LICENSEMAPPINGS["Universal Permissive License v1.0"]="UPL-1.0"
LICENSEMAPPINGS["University of Illinois/NCSA Open Source License"]="NCSA"
LICENSEMAPPINGS["US Naval Research Laboratory (NRL) v1.1"]="NRL"
LICENSEMAPPINGS["Vim License"]="Vim"
LICENSEMAPPINGS["VOSTROM Public License for | |
sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return safe_indexing(y, ind)
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.check_cv` instead.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` is used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.permutation_test_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.train_test_split` instead.
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be | |
<reponame>pyGSTi-Developers/pyGSTi<gh_stars>0
"""
Classes for constructing confidence regions
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import copy as _copy
import itertools as _itertools
import warnings as _warnings
import numpy as _np
import scipy.stats as _stats
from pygsti import optimize as _opt
from pygsti import tools as _tools
from pygsti.models.explicitcalc import P_RANK_TOL
from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable
from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from pygsti.circuits.circuitlist import CircuitList as _CircuitList
from pygsti.objectivefns.objectivefns import PoissonPicDeltaLogLFunction as _PoissonPicDeltaLogLFunction
from pygsti.objectivefns.objectivefns import Chi2Function as _Chi2Function
from pygsti.objectivefns.objectivefns import FreqWeightedChi2Function as _FreqWeightedChi2Function
# NON-MARKOVIAN ERROR BARS
#Connection with Robin's notes:
#
# Robins notes: pg 21 : want to set radius delta'(alpha,r2)
# via: lambda(G) = lambda(G_mle) + delta'
#
# Connecting with pyGSTi Hessian (H) calculations:
# lambda(G) = 2(maxLogL - logL(G)) ~ chi2_k (as defined in notes)
# lambda(G_mle) = 2(maxLogL - logL(G_mle))
#
# expand logL around max:
# logL(G_mle + dx) ~= logL(G_mle) - 1/2 dx*H*dx (no first order term)
#
# Thus, delta'
# delta' = lambda(G) - lambda(G_mle) = -2(log(G)-log(G_mle))
# = dx*H*dx ==> delta' is just like C1 or Ck scaling factors
# used for computing normal confidence regions
# (recall delta' is computed as the alpha-th quantile of a
# non-central chi^2_{K',r2} where K' = #of model params and
# r2 = lambda(G_mle) - (K-K'), where K = #max-model params (~#circuits)
# is the difference between the expected (mean) lambda (=K-K') and what
# we actually observe (=lambda(G_mle)).
#
class ConfidenceRegionFactory(_NicelySerializable):
"""
An object which is capable of generating confidence intervals/regions.
Often times, it does so by holding the Hessian of a fit function with
respect to a `Model`'s parameters and related projections of it onto the
non-gauge space.
Alternative (non-Hessian-based) means of computing confidence intervals
are also available, such as by using so-called "linear reponse error bars".
Parameters
----------
parent : Estimate
the parent estimate object, needed to resolve model and gate
string list labels.
model_lbl : str
The key into the parent `Estimate`'s `.models` dictionary that
gives the `Model` about which confidence regions will be
constructed.
circuit_list_lbl : str
The key into the parent `Results`'s `.circuit_lists` dictionary
that specifies which circuits should be or were included
when computing fit functions (the log-likelihood or chi2).
hessian : numpy array, optional
A pre-computed num_params x num_params Hessian matrix, where num_params is
the number of dimensions of model space, i.e. model.num_params.
non_mark_radius_sq : float, optional
The non-Markovian radius associated with the goodness of fit found
at the point where `hessian` was computed. This must be specified
whenver `hessian` is, and should be left as `None` when `hessian`
is not specified.
"""
def __init__(self, parent, model_lbl, circuit_list_lbl,
hessian=None, non_mark_radius_sq=None):
"""
Initializes a new ConfidenceRegionFactory.
Parameters
----------
parent : Estimate
the parent estimate object, needed to resolve model and gate
string list labels.
model_lbl : str
The key into the parent `Estimate`'s `.models` dictionary that
gives the `Model` about which confidence regions will be
constructed.
circuit_list_lbl : str
The key into the parent `Results`'s `.circuit_lists` dictionary
that specifies which circuits should be or were included
when computing fit functions (the log-likelihood or chi2).
hessian : numpy array, optional
A pre-computed num_params x num_params Hessian matrix, where num_params is
the number of dimensions of model space, i.e. model.num_params.
non_mark_radius_sq : float, optional
The non-Markovian radius associated with the goodness of fit found
at the point where `hessian` was computed. This must be specified
whenver `hessian` is, and should be left as `None` when `hessian`
is not specified.
"""
#May be specified (together) whey hessian has already been computed
assert(hessian is None or non_mark_radius_sq is not None), \
"'non_mark_radius_sq' must be non-None when 'hessian' is specified"
self.hessian = hessian
self.jacobian = None # just for sanity checking hessian projection
self.nonMarkRadiusSq = non_mark_radius_sq
self.hessian_projection_parameters = _collections.OrderedDict()
self.inv_hessian_projections = _collections.OrderedDict()
self.linresponse_gstfit_params = None
self.nNonGaugeParams = self.nGaugeParams = None
self.model_lbl = model_lbl
self.circuit_list_lbl = circuit_list_lbl
self.set_parent(parent)
def __getstate__(self):
# don't pickle parent (will create circular reference)
to_pickle = self.__dict__.copy()
del to_pickle['parent']
# *don't* pickle any Comm objects
if self.linresponse_gstfit_params and "resource_alloc" in self.linresponse_gstfit_params:
to_pickle['linresponse_gstfit_params'] = self.linresponse_gstfit_params.copy()
del to_pickle['linresponse_gstfit_params']['resource_alloc'] # one *cannot* pickle Comm objects
return to_pickle
def __setstate__(self, state_dict):
self.__dict__.update(state_dict)
self.parent = None # initialize to None upon unpickling
def _to_nice_serialization(self):
state = super()._to_nice_serialization()
state.update({'model_label': self.model_lbl,
'circuit_list_label': self.circuit_list_lbl,
'nonmarkovian_radius_squared': self.nonMarkRadiusSq,
'hessian_matrix': self._encodemx(self.hessian) if (self.hessian is not None) else None,
'hessian_projection_parameters': {k: v for k, v in self.hessian_projection_parameters.items()},
'inverse_hessian_projections': {k: self._encodemx(v)
for k, v in self.inv_hessian_projections.items()},
'num_nongauge_params': int(self.nNonGaugeParams) if (self.nNonGaugeParams is not None) else None,
'num_gauge_params': int(self.nGaugeParams) if (self.nGaugeParams is not None) else None,
#Note: need int(.) casts above because int64 is *not* JSON serializable (?)
#Note: we don't currently serialize self.linresponse_gstfit_params (!)
})
return state
@classmethod
def _from_nice_serialization(cls, state):
ret = cls(None, state['model_label'], state['circuit_list_label'],
cls._decodemx(state['hessian_matrix']) if (state['hessian_matrix'] is not None) else None,
state['nonmarkovian_radius_squared'])
if 'hessian_projection_parameters' in state: # for backward compatibility
for projection_lbl, params in state['hessian_projection_parameters'].items():
ret.hessian_projection_parameters[projection_lbl] = params # (param dict is entirely JSON-able)
ret.inv_hessian_projections[projection_lbl] = cls._decodemx(
state['inverse_hessian_projections'][projection_lbl])
ret.nNonGaugeParams = state['num_nongauge_params']
ret.nGaugeParams = state['num_gauge_params']
return ret
def set_parent(self, parent):
"""
Sets the parent Estimate object of this ConfidenceRegionFactory.
This function is usually only needed internally to re-link a
ConfidenceRegionFactory with its parent after be un-serialized
from disk.
Parameters
----------
parent : Estimate
The parent of this object.
Returns
-------
None
"""
self.parent = parent
@property
def has_hessian(self):
"""
Returns whether or not the Hessian has already been computed.
When True, :func:`project_hessian` can be used to project the
Hessian for use in creating confidence intervals. When False,
either :func:`compute_hessian` can be called to compute the
Hessian or slower methods must be used to estimate the necessary
portion of the Hessian. The result of this function is often used
to decide whether or not to proceed with an error-bar computation.
Returns
-------
bool
"""
#return bool(self.invRegionQuadcForm is not None)
return bool(self.hessian is not None)
def can_construct_views(self):
"""
Checks whether this factory has enough information to construct 'views' of itself.
`ConfidenceRegionFactoryView` view objects are created using the
:method:`view` method, which can in turn be used to construct
confidence intervals.
Returns
-------
bool
"""
try:
self.view(95) # will raise assertion errors
return True
except:
return False
@property
def model(self):
"""
Retrieve the associated model.
Returns
-------
Model
the model marking the center location of this confidence region.
"""
assert(self.parent is not None) # Estimate
return self.parent.models[self.model_lbl]
def compute_hessian(self, comm=None, mem_limit=None, approximate=False):
"""
Computes the Hessian for this factory.
Parameters
----------
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
mem_limit : int, optional
A rough memory limit in bytes which restricts the amount of intermediate
values that are computed and stored.
approximate : bool, optional
Whether to compute the true Hessian or just an approximation of it.
See :function:`logl_approximate_hessian`. Setting to True can
significantly reduce the run time.
Returns
-------
numpy.ndarray
The Hessian matrix (also stored internally)
"""
assert(self.parent is not None) # Estimate
assert(self.parent.parent is not None) # Results
model = self.parent.models[self.model_lbl]
circuit_list = self.parent.parent.circuit_lists[self.circuit_list_lbl]
dataset = self.parent.parent.dataset
#extract any parameters we can get from the Estimate
objfn_builder = self.parent.final_objfn_builder
regularization = objfn_builder.regularization if (objfn_builder.regularization is not None) else {}
penalties = objfn_builder.penalties if (objfn_builder.penalties is not None) else {}
if issubclass(objfn_builder.cls_to_build, _PoissonPicDeltaLogLFunction):
obj = 'logl'
useFreqWt = False
elif issubclass(objfn_builder.cls_to_build, (_Chi2Function, _FreqWeightedChi2Function)):
obj = 'chi2'
useFreqWt = issubclass(objfn_builder.cls_to_build, _FreqWeightedChi2Function)
else:
raise ValueError("Unsupported objective function class: " + objfn_builder.cls_to_build.__name__)
minProbClip = regularization.get('min_prob_clip', 1e-4)
minProbClipForWeighting = regularization.get('min_prob_clip_for_weighting', 1e-4)
probClipInterval = penalties.get('prob_clip_interval', (-1e6, 1e6))
radius = regularization.get('radius', 1e-4)
cptp_penalty_factor = penalties.get('cptp_penalty_factor', 0)
spam_penalty_factor = penalties.get('spam_penalty_factor', 0)
aliases = circuit_list.op_label_aliases if isinstance(circuit_list, _CircuitList) else None
vb = 3 if mem_limit else 0 # only show details of | |
<filename>main.py
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from google.appengine.api import memcache
from string import letters
from google.appengine.ext import db
import webapp2
import jinja2
import logging
import datetime
import hmac
import random
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
SECRET = 'somethingsecret'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def blog_key(name = 'default'):
return db.Key.from_path('blogs', name)
def make_salt():
return ''.join(random.choice(letters) for i in xrange(5))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hmac.new(SECRET, name + pw + salt).hexdigest()
return "%s|%s" % (h, salt)
def valid_cookie(cookie):
if cookie:
user_data = cookie.split('|', 1)
user = db.GqlQuery("SELECT * FROM User WHERE name = '%s' AND pw_hash ='%s'" % (user_data[0], user_data[1])).get()
return True
else:
return False
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def validPwHash(user, pw):
hash = memcache.get(key = user)
if hash is None:
user_data = db.GqlQuery("SELECT * FROM User WHERE name = '%s'" % (user), read_policy=db.STRONG_CONSISTENCY).get()
# Question : Correct syntax regarding the "read_policy=db.STRONG_CONSISTENCY" part ?
if user_data:
hash = user_data.pw_hash
salt = user_data.pw_hash.split('|')[1]
if hash == make_pw_hash(user, pw, salt):
if not memcache.add(key = user, value = hash):
logging.debug('Memcache add failed.')
return hash
else:
salt = hash.split('|')[1]
if hash == make_pw_hash(user, pw, salt):
return hash
return False
if userData is not None:
salt = userData.pw_hash.split('|', 1)[1]
if userData.pw_hash == make_pw_hash(user, pw, salt):
return userData.pw_hash
return False
def loggedInUser(cookie):
user = None
logging.info('-------------------- cookie : %s' % cookie)
# cookie = self.request.cookies.get('user')
if cookie:
user_data = cookie.split('|', 1)
hash = memcache.get(user_data[0])
if hash is None:
user = db.GqlQuery("SELECT * FROM User WHERE name = '%s' AND pw_hash ='%s'" % (user_data[0], user_data[1]), read_policy=db.STRONG_CONSISTENCY).get()
if user:
hash = user.pw_hash
if not memcache.add(key = user.name, value = user.pw_hash):
logging.error('Memcache add failed.')
if user_data[1] == hash:
return user_data[0]
else:
return None
class User(db.Model):
name = db.StringProperty(required = True)
pw_hash = db.StringProperty(required = True)
email = db.EmailProperty(required = False)
created = db.DateTimeProperty(auto_now_add = True)
class Item(db.Model):
title = db.StringProperty(required = True)
text = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
creator = db.StringProperty(required = True)
def render(self):
self._render_text = self.text.replace('\n', '<br>')
return render_str('post.html', p = self)
class Lajk(db.Model):
comment = db.ReferenceProperty(Item)
user = db.ReferenceProperty(User)
# pw_hash = make_pw_hash('me', 'admin')
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def loggedInUser(self):
cookie = self.request.cookies.get('user')
logging.info('loggedInUser() cookie = %s' % cookie)
if cookie:
user_data = cookie.split('|', 1)
logger.info('user_data: %s , %s' % (user_data[0], user_data[1]))
user = db.GqlQuery("SELECT * FROM User WHERE name = '%s' AND pw_hash ='%s'" % (user_data[0], user_data[1]), read_policy=db.STRONG_CONSISTENCY).get()
if user:
logger.info('valid cookie() db.GqlQuery : user =%s' % (user.name))
return user_data[0]
# If no cookie or no match in user DB then return False
else:
logging.info('FAILED! loggedInUser(): user =%s' % user)
return False
class MainPage(Handler):
def get(self):
# items = self.request.get_all('food')
items = db.GqlQuery("SELECT * FROM Item ORDER BY created DESC")
self.render('signup.html', items = items)
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class MakeBlogPost(Handler):
def render_front(self, title = "", text = ""):
self.render('new-post.html', title = title, text = text)
def get(self):
user = self.loggedInUser()
if user:
self.render('new-post.html', user = user)
else:
self.redirect('/blog')
def post(self):
user = self.loggedInUser()
if user:
self.render('new-post.html', user = user)
else:
self.redirect('/blog')
post_title = self.request.get('title')
post_text = self.request.get('text')
params = dict(title = post_title, text = post_text)
if post_title and post_text:
i = Item(parent = blog_key(), title = post_title, text = post_text, creator = user)
i.put()
self.redirect('/blog/%s' % str(i.key().id()))
else:
self.render_front(**params)
class EditPost(Handler):
def render_front(self, title = "", text = ""):
self.render('edit-post.html', title = title, text = text)
def post(self):
user = self.loggedInUser()
logging.info('EditPost()')
if user:
self.render('edit-post.html', user = user)
else:
self.redirect('/blog')
post_title = self.request.get('title')
post_text = self.request.get('text')
params = dict(title = post_title, text = post_text)
if post_title and post_text:
key = db.Key.from_path('Item', int(post_id), parent=blog_key())
post = db.get(key)
post.title = post_title
post.text = post_text
post.put()
self.redirect('/blog/%s' % str(post.key().id()))
else:
self.render_front(**params)
class PostPage(Handler):
def get(self, post_id):
key = db.Key.from_path('Item', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
self.error(404)
return
self.render("permalink.html", post = post)
class Signup(Handler):
def get(self):
# user_cookie = self.request.cookies.get('user')
# logger.debug("nonvalid user_cookie=%s" % user_cookie)
if self.loggedInUser():
self.redirect('/blog')
else:
self.render('signup.html')
def post(self):
# self.response.headers['Content-Type'] = 'text/plain'
have_error = False
username = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
params = dict(username = username, email = email)
if not valid_username(username):
params['error_username'] = 'Not valid username.'
have_error = True
if not valid_password(password):
params['error_password'] = 'Not valid password.'
have_error = True
elif password != verify:
params['error_verify'] = 'Your passwords did not match.'
have_error = True
if not valid_email(email):
params['error_email'] = 'That is not a valid email.'
have_error = True
if have_error:
self.render('signup.html', **params)
else:
isUserInDB = db.GqlQuery("SELECT * FROM User WHERE name = '%s'" % username).get()
if isUserInDB:
params['error_username'] = 'Username not available.'
self.render('signup.html', **params)
else:
user_params = {}
user_params['name'] = username
user_params['pw_hash'] = make_pw_hash(username, password)
if email:
user_params['email'] = email
u = User(**user_params)
u.put()
self.response.headers.add_header('Set-Cookie', 'user=%s|%s' % (str(username), user_params['pw_hash']))
memcache.add( key = username, value = user_params['pw_hash'])
self.redirect('/welcome')
class Login(Handler):
def get(self):
self.render('login.html')
def post(self):
have_error = False
username = self.request.get('username')
password = self.request.get('password')
params = dict(username = username)
if not valid_username(username):
params['error_username'] = 'Not valid username.'
have_error = True
# logging.info('login have_error username =%s' % username)
if not valid_password(password):
params['error_password'] = 'Not valid password.'
have_error = True
# logging.info('login have_error password =%s' % password)
logging.info('login have_error =%s' % have_error)
if not have_error:
pw_hash = validPwHash(username, password)
logging.info('pw_hash=%s' % (pw_hash))
if pw_hash:
# pw_hash = make_pw_hash(username, password)
self.response.headers.add_header('Set-Cookie', 'user=%s|%s' % (str(username), (str(pw_hash))))
memcache.add(key='current_user', value=username, time=3600)
self.redirect('/blog')
else:
params['error_username'] = 'Not valid username and/or password.'
self.render('/login.html', **params)
class Welcome(Handler):
def get(self):
self.render('welcome.html')
class Logout(Handler):
def get(self):
self.response.headers.add_header('Set-Cookie', 'user=')
self.redirect('/blog')
class Blog(Handler):
def get(self):
current_user = None
params = dict()
cookie = self.request.cookies.get('user')
if cookie:
current_user = loggedInUser(cookie) # None if no user is logged in.
items = db.GqlQuery("SELECT * FROM Item ORDER BY created DESC")
userEntity = db.GqlQuery("SELECT * FROM User WHERE name='%s'" % current_user).get()
commentDataArray = []
i = 0
for item in items:
commentData = {}
commentData['number'] = i
commentData['item'] = item
commentData['timeMessage'] = item.created.strftime("%A %d. %B %Y")
# logging.info('item.creator = %s' % item.creator)
# logging.info('item.text = %s' % item.text)
lajkByCurrentUser = None
commentData['liked'] = False
numberOfLikes = Lajk.all().ancestor(item.key()).count()
commentData['numberOfLikes'] = numberOfLikes
if current_user:
filteredLikes = Lajk.all().ancestor(item.key()).filter("user =", userEntity)
itemLiked = Lajk.all().ancestor(item.key()).filter("user =", userEntity).get()
# logging.info('number of likes = %s' % allLikes.count())
# logging.info('number of Flikes = %s' % filteredLikes.count())
if itemLiked is not None:
lajkByCurrentUser = True
logging.info("liked by current user = %s" % itemLiked.user.name)
else:
logging.info('NOT liked!')
if lajkByCurrentUser:
commentData['liked'] = True
# logging.info('liked by currnet user, set to True')
# logging.info('likedByCurrentUser = %s' % likedByCurrentUser.comment)
commentDataArray.append(commentData)
i += 1
# for commentData in commentDataArray:
# logging.info('commentData.comment.text = %s' % commentData['item'].text)
# if commentData['liked']:
# logging.info('comment is liked')
# params = dict(items = items)
# params = dict(comments = commentDataArray)
params['comments'] = commentDataArray
timeMessages = []
params['timeMessages'] = timeMessages
# for item in items:
# logging.info(item.ID)
if current_user is not None:
params['user'] = current_user
logging.info('current user is : %s' % current_user)
self.render('blog.html', **params)
def post(self):
user = self.loggedInUser()
if user:
# self.render('blog.html', user = user)
edit_postID = self.request.get('edit_postID')
editButton = self.request.get('editButton')
like_postID = self.request.get('like_postID')
likeButton = self.request.get('likeButton')
if likeButton :
comment = db.get(edit_postID)
userEntity = db.GqlQuery("SELECT * FROM User WHERE name = '%s'" % user, read_policy=db.STRONG_CONSISTENCY).get()
if likeButton == 'liked was pressed':
like = Lajk(parent | |
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from hypothesis import assume, given
from hypothesis.extra.numpy import from_dtype
from hypothesis.strategies import booleans, floats, integers, just, lists, sampled_from
from hypothesis_gufunc.gufunc import gufunc_args as gufunc
from scipy.interpolate import interp1d
from sklearn.preprocessing import LabelBinarizer
import bayesmark.space as sp
from bayesmark.np_util import linear_rescale
from bayesmark.space import CAT_DTYPE, CAT_KIND, CAT_NATIVE_DTYPE
from hypothesis_util import broadcast_tester, close_enough, gufunc_floats
from util import space_configs
INT_MIN = np.iinfo("i").min
INT_MAX = np.iinfo("i").max
WARPS = ("logit", "linear", "bilog", "log")
ENCODER_DTYPES = ("bool", "int", "float")
def encoder_gen(args):
X, labels, assume_sorted, dtype, assume_valid = args
if assume_sorted:
labels = np.sort(labels)
X = labels[X % len(labels)]
dtype = dtype.item() # np.array does not like np.array(dtype)
return X, labels, assume_sorted, dtype, assume_valid
def decoder_gen(args):
Y, labels, assume_sorted, dtype, assume_valid = args
if assume_sorted:
labels = np.sort(labels)
dtype = dtype.item()
return Y, labels, assume_sorted, dtype, assume_valid
def decoder_gen_broadcast(args):
Y, labels, assume_sorted = args
if assume_sorted:
labels = np.sort(labels)
return Y, labels, assume_sorted
@given(
gufunc(
"(),(n),(),(),()->(n)",
dtype=[np.int_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[
integers(0, INT_MAX),
from_dtype(np.dtype(CAT_DTYPE)),
booleans(),
sampled_from(ENCODER_DTYPES),
booleans(),
],
unique=[False, True, False, False, False],
min_side={"n": 1},
).map(encoder_gen)
)
def test_encode_decode(args):
X, labels, assume_sorted, dtype, assume_valid = args
Y = sp.encode(X, labels, assume_sorted=assume_sorted, dtype=dtype, assume_valid=assume_valid)
if assume_sorted: # otherwise labels will be re-arranged
(idx,), = np.where(Y > 0)
assert np.asarray(labels[idx]) == X
assert Y.dtype == dtype
X2 = sp.decode(Y, labels, assume_sorted=assume_sorted)
assert close_enough(X, X2)
@given(
gufunc(
"(m),(n),(),(),()->(n)",
dtype=[np.int_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[
integers(0, INT_MAX),
from_dtype(np.dtype(CAT_DTYPE)),
booleans(),
sampled_from(ENCODER_DTYPES),
booleans(),
],
unique=[False, True, False, False, False],
min_side={"m": 1, "n": 3},
).map(encoder_gen)
)
def test_encoder_to_sklearn(args):
# sklearn cannot handle this correctly unless n >= 3
X, labels, assume_sorted, dtype, assume_valid = args
Y = sp.encode(X, labels, assume_sorted=assume_sorted, dtype=dtype, assume_valid=assume_valid)
enc = LabelBinarizer()
enc.fit(labels)
Y2 = enc.transform(X)
assert close_enough(Y, Y2.astype(dtype))
@given(
gufunc(
"(m,n),(n),(),(),()->(n)",
dtype=[np.float_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[floats(), from_dtype(np.dtype(CAT_DTYPE)), booleans(), sampled_from(ENCODER_DTYPES), booleans()],
unique=[False, True, False, False, False],
min_side={"n": 1},
).map(decoder_gen)
)
def test_decode_encode(args):
Y, labels, assume_sorted, dtype, assume_valid = args
assert Y.ndim >= 1 and Y.shape[-1] == len(labels)
X = sp.decode(Y, labels, assume_sorted=assume_sorted)
Y2 = sp.encode(X, labels, assume_sorted=assume_sorted, dtype=dtype, assume_valid=assume_valid)
# The encoding is defined as the argmax
assert np.all(Y.argmax(axis=1) == Y2.argmax(axis=1))
assert np.all(np.sum(Y2 != 0, axis=1) == 1)
assert np.all(np.sum(Y2 == 1, axis=1) == 1)
@given(
gufunc(
"(m,n),(n),(),(),()->(n)",
dtype=[np.float_, CAT_DTYPE, np.bool_, str, np.bool_],
elements=[floats(), from_dtype(np.dtype(CAT_DTYPE)), booleans(), sampled_from(ENCODER_DTYPES), booleans()],
unique=[False, True, False, False, False],
min_side={"m": 1, "n": 3},
).map(decoder_gen)
)
def test_decode_to_sklearn(args):
Y, labels, assume_sorted, dtype, assume_valid = args
assert Y.ndim >= 1 and Y.shape[-1] == len(labels)
X = sp.decode(Y, labels, assume_sorted=assume_sorted)
enc = LabelBinarizer()
enc.fit(labels)
X2 = enc.inverse_transform(Y)
assert X.dtype.kind == CAT_KIND
assert close_enough(X, X2.astype(X.dtype))
def test_encode_broadcast_bool():
broadcast_tester(
sp.encode,
"(),(n),(),(),()->(n)",
otype=bool,
excluded=(1, 2, 3, 4),
dtype=[np.int_, CAT_DTYPE, np.bool_, object, np.bool_],
elements=[integers(0, INT_MAX), from_dtype(np.dtype(CAT_DTYPE)), booleans(), just("bool"), booleans()],
unique=[False, True, False, False, False],
min_side={"n": 1},
map_=encoder_gen,
)
def test_encode_broadcast_int():
broadcast_tester(
sp.encode,
"(),(n),(),(),()->(n)",
otype=int,
excluded=(1, 2, 3, 4),
dtype=[np.int_, CAT_DTYPE, np.bool_, object, np.bool_],
elements=[integers(0, INT_MAX), from_dtype(np.dtype(CAT_DTYPE)), booleans(), just("int"), booleans()],
unique=[False, True, False, False, False],
min_side={"n": 1},
map_=encoder_gen,
)
def test_encode_broadcast_float():
broadcast_tester(
sp.encode,
"(),(n),(),(),()->(n)",
otype=float,
excluded=(1, 2, 3, 4),
dtype=[np.int_, CAT_DTYPE, np.bool_, object, np.bool_],
elements=[integers(0, INT_MAX), from_dtype(np.dtype(CAT_DTYPE)), booleans(), just("float"), booleans()],
unique=[False, True, False, False, False],
min_side={"n": 1},
map_=encoder_gen,
)
def test_decode_broadcast_bool():
broadcast_tester(
sp.decode,
"(m,n),(n),()->(m)",
otype=CAT_DTYPE,
excluded=(1, 2),
dtype=[np.bool_, CAT_DTYPE, np.bool_],
elements=[booleans(), from_dtype(np.dtype(CAT_DTYPE)), booleans()],
unique=[False, True, False],
min_side={"n": 1},
map_=decoder_gen_broadcast,
)
def test_decode_broadcast_int():
broadcast_tester(
sp.decode,
"(m,n),(n),()->(m)",
otype=CAT_DTYPE,
excluded=(1, 2),
dtype=[np.int_, CAT_DTYPE, np.bool_],
elements=[integers(INT_MIN, INT_MAX), from_dtype(np.dtype(CAT_DTYPE)), booleans()],
unique=[False, True, False],
min_side={"n": 1},
map_=decoder_gen_broadcast,
)
def test_decode_broadcast_float():
broadcast_tester(
sp.decode,
"(m,n),(n),()->(m)",
otype=CAT_DTYPE,
excluded=(1, 2),
dtype=[np.float_, CAT_DTYPE, np.bool_],
elements=[floats(), from_dtype(np.dtype(CAT_DTYPE)), booleans()],
unique=[False, True, False],
min_side={"n": 1},
map_=decoder_gen_broadcast,
)
@given(gufunc("()->()", dtype=np.float_, elements=floats()))
def test_bilog_props(args):
x, = args
y = sp.bilog(x)
assert sp.bilog(0) == 0 # This could be its own test
assert close_enough(y, -sp.bilog(-x), equal_nan=True)
assert np.isfinite(y) == np.isfinite(x)
@given(gufunc_floats("(2)->(2)", allow_infinity=False, allow_nan=False))
def test_bilog_monotonic(args):
x, = args
x1, x2 = sorted(np.abs(x))
assert sp.bilog(x1) < sp.bilog((1 + 1e-6) * x2 + 1e-6)
@given(gufunc("()->()", dtype=np.float_, elements=floats()))
def test_bilog_biexp(args):
x, = args
assert close_enough(sp.biexp(sp.bilog(x)), x, equal_nan=True)
def test_bilog_broadcast():
broadcast_tester(sp.bilog, "()->()", otype=float)
def test_biexp_broadcast():
broadcast_tester(sp.biexp, "()->()", otype=float, min_value=-10, max_value=10)
@given(sampled_from(WARPS), gufunc_floats("(n),(m)->(n)", allow_infinity=False, allow_nan=False))
def test_real_values_warp_unwarp(warp, args):
x, values = args
if warp == "log":
values = values[values > 0]
if warp == "logit":
values = values[(0 < values) & (values < 1)]
# We could eliminate need for this if we split out test for log and logit
# cases and specify unique flag, but works as is
v = np.unique(values)
assume(len(v) >= 2)
f = interp1d(v, v, kind="nearest", fill_value="extrapolate")
x = f(x)
assert x.ndim == 1 # make sure interp1d did not mess it up
S = sp.Real(warp=warp, values=values)
y = S.warp(x)
assert y.shape == x.shape + (1,)
assert y.dtype == sp.WARPED_DTYPE
# Test bounds
lower, upper = S.get_bounds().T
assert np.all(lower <= y)
assert np.all(y <= upper)
y2 = S.validate_warped(y)
assert close_enough(y, y2)
x2 = S.unwarp(y)
assert x2.shape == x.shape
x3 = S.validate(x2)
assert close_enough(x2, x3)
assert close_enough(x, x2)
@given(sampled_from(WARPS), gufunc_floats("(n),(2)->(n)", allow_infinity=False, allow_nan=False))
def test_real_range_warp_unwarp(warp, args):
x, range_ = args
if warp == "log":
range_ = range_[range_ > 0]
if warp == "logit":
range_ = range_[(0 < range_) & (range_ < 1)]
range_ = np.sort(range_)
assume(len(range_) == 2 and range_[0] < range_[1])
x = np.clip(x, range_[0], range_[1])
S = sp.Real(warp=warp, range_=range_)
y = S.warp(x)
assert y.shape == x.shape + (1,)
assert y.dtype == sp.WARPED_DTYPE
# Test bounds
lower, upper = S.get_bounds().T
assert np.all(lower <= y)
assert np.all(y <= upper)
y2 = S.validate_warped(y)
assert close_enough(y, y2)
x2 = S.unwarp(y)
assert x2.shape == x.shape
x3 = S.validate(x2)
assert close_enough(x2, x3)
assert close_enough(x, x2)
# Note to really stress test this we should elim min and max val, but that
# requires that we split out a diff test func for log and logit
@given(sampled_from(WARPS), gufunc_floats("(n,1),(2)->(n)", min_value=-1000, max_value=1000))
def test_real_range_unwarp_warp(warp, args):
x_w, range_ = args
if warp == "log":
range_ = range_[range_ > 0]
if warp == "logit":
range_ = range_[(0 < range_) & (range_ < 1)]
range_ = np.sort(range_)
assume(len(range_) == 2 and range_[0] < range_[1])
range_warped = sp.WARP_DICT[warp](range_)
x_w = np.clip(x_w, range_warped[0], range_warped[1])
S = sp.Real(warp=warp, range_=range_)
# Test bounds
lower, upper = S.get_bounds().T
x_w = linear_rescale(x_w, lb0=-1000, ub0=1000, lb1=lower, ub1=upper)
x = S.unwarp(x_w)
assert x_w.shape == x.shape + (1,)
assert x.dtype == range_.dtype
assert x.dtype == S.dtype
x2 = S.validate(x)
assert close_enough(x, x2)
x_w2 = S.warp(x)
assert x_w2.shape == x_w.shape
x_w3 = S.validate_warped(x_w2)
assert close_enough(x_w2, x_w3)
assert close_enough(x_w, x_w2)
@given(
sampled_from(("linear", "bilog")),
gufunc("(n),(m)->(n)", dtype=np.int_, elements=integers(INT_MIN, INT_MAX), unique=[False, True], min_side={"m": 2}),
)
def test_int_values_warp_unwarp(warp, args):
x, values = args
v = np.unique(values) # Also sort
assert len(v) >= 2
f = interp1d(v, v, kind="nearest", fill_value="extrapolate")
x = f(x).astype(values.dtype)
assert x.ndim == 1 # make sure interp1d did not mess it up
S = sp.Integer(warp=warp, values=values)
y = S.warp(x)
assert y.shape == x.shape + (1,)
assert y.dtype == sp.WARPED_DTYPE
# Test bounds
lower, upper = S.get_bounds().T
assert np.all(lower <= y)
assert np.all(y <= upper)
y2 = S.validate_warped(y)
assert close_enough(y, y2)
x2 = S.unwarp(y)
assert x2.shape == x.shape
x3 = S.validate(x2)
assert close_enough(x2, x3)
assert close_enough(x, x2)
@given(gufunc("(n),(m)->(n)", dtype=np.int_, elements=integers(1, INT_MAX), unique=[False, True], min_side={"m": 2}))
def test_log_int_values_warp_unwarp(args):
x, values = args
warp = "log"
v = np.unique(values) # Also sort
assert len(v) >= 2
f = interp1d(v, v, kind="nearest", fill_value="extrapolate")
x = f(x).astype(values.dtype)
assert x.ndim == 1 # make sure interp1d did not mess it up
S = sp.Integer(warp=warp, values=values)
y = S.warp(x)
assert y.shape == x.shape + (1,)
assert y.dtype == sp.WARPED_DTYPE
# Test bounds
lower, upper = S.get_bounds().T
assert np.all(lower <= y)
assert np.all(y <= upper)
y2 = S.validate_warped(y)
assert close_enough(y, y2)
x2 = S.unwarp(y)
assert x2.shape == x.shape
x3 = S.validate(x2)
assert close_enough(x2, x3)
assert close_enough(x, x2)
@given(sampled_from(("linear", "bilog", "log")), gufunc("(n),(2)->(n)", dtype=np.int_, elements=integers(-1000, 1000)))
def test_int_range_warp_unwarp(warp, args):
"""Warning: this explicitly ignores issues with | |
the library doesn't exist,
the corresponding tuple element will be None. For example, this
dictionary signifies that 'requirements.txt' requires flask with
version 1.0.1 while the 'third_party/python_libs' directory contains
flask 1.1.1:
{
flask: ('1.0.1', '1.1.1')
}
"""
# Handling 5 or more mismatches requires 5 or more individual `pip install`
# commands, which is slower than just reinstalling all of the libraries
# using `pip install -r requirements.txt`.
if len(mismatches) >= 5:
if os.path.isdir(common.THIRD_PARTY_PYTHON_LIBS_DIR):
shutil.rmtree(common.THIRD_PARTY_PYTHON_LIBS_DIR)
_reinstall_all_dependencies()
return
# The library is installed in the directory but is not listed in
# requirements. We don't have functionality to remove a library cleanly, and
# if we ignore the library, this might cause issues when pushing the branch
# to develop as there might be possible hidden use cases of a deleted
# library that the developer did not catch. The only way to enforce the
# removal of a library is to clean out the folder and reinstall everything
# from scratch.
if any(required is None for required, _ in mismatches.values()):
if os.path.isdir(common.THIRD_PARTY_PYTHON_LIBS_DIR):
shutil.rmtree(common.THIRD_PARTY_PYTHON_LIBS_DIR)
_reinstall_all_dependencies()
return
git_mismatches, pip_mismatches = (
utils.partition(mismatches.items(), predicate=_is_git_url_mismatch))
for normalized_library_name, versions in git_mismatches:
requirements_version, directory_version = versions
# The library listed in 'requirements.txt' is not in the
# 'third_party/python_libs' directory.
if not directory_version or requirements_version != directory_version:
_install_direct_url(normalized_library_name, requirements_version)
for normalized_library_name, versions in pip_mismatches:
requirements_version = (
pkg_resources.parse_version(versions[0]) if versions[0] else None)
directory_version = (
pkg_resources.parse_version(versions[1]) if versions[1] else None)
# The library listed in 'requirements.txt' is not in the
# 'third_party/python_libs' directory.
if not directory_version:
_install_library(
normalized_library_name,
python_utils.convert_to_bytes(requirements_version))
# The currently installed library version is not equal to the required
# 'requirements.txt' version.
elif requirements_version != directory_version:
_install_library(
normalized_library_name,
python_utils.convert_to_bytes(requirements_version))
_remove_metadata(
normalized_library_name,
python_utils.convert_to_bytes(directory_version))
def _is_git_url_mismatch(mismatch_item):
"""Returns whether the given mismatch item is for a GitHub URL."""
_, (required, _) = mismatch_item
return required.startswith('git')
def _install_direct_url(library_name, direct_url):
"""Installs a direct URL to GitHub into the third_party/python_libs folder.
Args:
library_name: str. Name of the library to install.
direct_url: str. Full definition of the URL to install. Must match
GIT_DIRECT_URL_REQUIREMENT_PATTERN.
"""
pip_install(
'%s#egg=%s' % (direct_url, library_name),
common.THIRD_PARTY_PYTHON_LIBS_DIR,
upgrade=True,
no_dependencies=True)
def _get_pip_versioned_package_string(library_name, version_string):
"""Returns the standard 'library==version' string for the given values.
Args:
library_name: str. The normalized name of the library.
version_string: str. The version of the package as a string.
Returns:
str. The standard versioned library package name.
"""
return '%s==%s' % (library_name, version_string)
def _install_library(library_name, version_string):
"""Installs a library with a certain version to the
'third_party/python_libs' folder.
Args:
library_name: str. Name of the library to install.
version_string: str. Stringified version of the library to install.
"""
pip_install(
_get_pip_versioned_package_string(library_name, version_string),
common.THIRD_PARTY_PYTHON_LIBS_DIR,
upgrade=True,
no_dependencies=True
)
def _reinstall_all_dependencies():
"""Reinstalls all of the libraries detailed in the compiled
'requirements.txt' file to the 'third_party/python_libs' folder.
"""
_pip_install_requirements(
common.THIRD_PARTY_PYTHON_LIBS_DIR,
common.COMPILED_REQUIREMENTS_FILE_PATH
)
def _get_possible_normalized_metadata_directory_names(
library_name, version_string):
"""Returns possible normalized metadata directory names for python libraries
installed using pip (following the guidelines of PEP-427 and PEP-376).
This ensures that our _remove_metadata() function works as intended. More
details about the guidelines concerning the metadata folders can be found
here:
https://www.python.org/dev/peps/pep-0427/#file-contents
https://www.python.org/dev/peps/pep-0376/#how-distributions-are-installed.
Args:
library_name: str. Name of the library.
version_string: str. Stringified version of the library.
Returns:
set(str). Set containing the possible normalized directory name strings
of metadata folders.
"""
# Some metadata folders replace the hyphens in the library name with
# underscores.
return {
normalize_directory_name(
'%s-%s.dist-info' % (library_name, version_string)),
normalize_directory_name(
'%s-%s.dist-info' % (
library_name.replace('-', '_'), version_string)),
normalize_directory_name(
'%s-%s.egg-info' % (library_name, version_string)),
normalize_directory_name(
'%s-%s.egg-info' % (
library_name.replace('-', '_'), version_string)),
}
def _verify_pip_is_installed():
"""Verify that pip is installed.
Raises:
ImportError. Error importing pip.
"""
try:
python_utils.PRINT('Checking if pip is installed on the local machine')
# Importing pip just to check if its installed.
import pip #pylint: disable=unused-variable
except ImportError as e:
common.print_each_string_after_two_new_lines([
'Pip is required to install Oppia dependencies, but pip wasn\'t '
'found on your local machine.',
'Please see \'Installing Oppia\' on the Oppia developers\' wiki '
'page:'])
if common.is_mac_os():
python_utils.PRINT(
'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Mac-'
'OS%29')
elif common.is_linux_os():
python_utils.PRINT(
'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Linux'
'%29')
else:
python_utils.PRINT(
'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28'
'Windows%29')
raise ImportError('Error importing pip: %s' % e)
def _run_pip_command(cmd_parts):
"""Run pip command with some flags and configs. If it fails try to rerun it
with additional flags and else raise an exception.
Args:
cmd_parts: list(str). List of cmd parts to be run with pip.
Raises:
Exception. Error installing package.
"""
_verify_pip_is_installed()
# The call to python -m is used to ensure that Python and Pip versions are
# compatible.
command = [sys.executable, '-m', 'pip'] + cmd_parts
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
python_utils.PRINT(stdout)
elif 'can\'t combine user with prefix' in stderr:
python_utils.PRINT('Trying by setting --user and --prefix flags.')
subprocess.check_call(
command + ['--user', '--prefix=', '--system'])
else:
python_utils.PRINT(stderr)
python_utils.PRINT(
'Refer to https://github.com/oppia/oppia/wiki/Troubleshooting')
raise Exception('Error installing package')
def pip_install_to_system(package, version):
"""Installs third party libraries with pip to the user's system.
Note: These libraries are installed to the user's default system-wide
'site-packages' folder, not to a local Oppia third-party directory. This is
ONLY required in very specific cases where the development server scripts
require default libraries. (When running another python script using
the shell, the call stack that is instantiated for that python script cannot
be edited by us; therefore, we have no control over which system paths, the
script visits when it looks for libraries and can only install those
necessary libraries to the default system path.)
In general, please DO NOT use this method when installing packages required
for oppia. Use pip_install instead.
Args:
package: str. The package name.
version: str. The package version.
"""
_run_pip_command(
['install', _get_pip_versioned_package_string(package, version)])
def pip_install(
versioned_package, install_path, upgrade=False, no_dependencies=False):
"""Installs third party libraries with pip to a specific path.
Args:
versioned_package: str. A 'lib==version' formatted string.
install_path: str. The installation path for the package.
upgrade: bool. Whether to call pip with the --upgrade flag.
no_dependencies: bool. Whether call the pip with --no-dependencies flag.
"""
additional_pip_args = []
if upgrade:
additional_pip_args.append('--upgrade')
if no_dependencies:
additional_pip_args.append('--no-dependencies')
_run_pip_command([
'install', versioned_package, '--target', install_path
] + additional_pip_args)
def _pip_install_requirements(install_path, requirements_path):
"""Installs third party libraries from requirements files with pip.
Args:
install_path: str. The installation path for the packages.
requirements_path: str. The path to the requirements file.
"""
_run_pip_command([
'install', '--target', install_path, '--no-dependencies',
'-r', requirements_path, '--upgrade'
])
def get_mismatches():
"""Returns a dictionary containing mismatches between the 'requirements.txt'
file and the 'third_party/python_libs' directory. Mismatches are defined as
the following inconsistencies:
1. A library exists in the requirements file but is not installed in the
'third_party/python_libs' directory.
2. A library is installed in the 'third_party/python_libs'
directory but it is not listed in the requirements file.
3. The library version installed is not as recent as the library version
listed in the requirements file.
4. The library version installed is more recent than the library version
listed in the requirements file.
Returns:
dict(str, tuple(str|None, str|None)). Dictionary with the
library names as keys and tuples as values. The 1st element of the
tuple is the version string of the library required by the
requirements.txt file while the 2nd element is the version string of
the library currently in the 'third_party/python_libs' directory. If
the library doesn't exist, the corresponding tuple element will be None.
For example, the following dictionary signifies that 'requirements.txt'
requires flask with version 1.0.1 while the 'third_party/python_libs'
directory contains flask 1.1.1 (or mismatch 4 above):
{
flask: ('1.0.1', '1.1.1')
}
"""
requirements_contents = _get_requirements_file_contents()
directory_contents = _get_third_party_python_libs_directory_contents()
mismatches = {}
for normalized_library_name in requirements_contents:
# Library exists in the directory and the requirements file.
if normalized_library_name in directory_contents:
# Library matches but version doesn't match.
if (directory_contents[normalized_library_name] !=
requirements_contents[normalized_library_name]):
mismatches[normalized_library_name] = (
requirements_contents[normalized_library_name],
directory_contents[normalized_library_name])
# Library exists in the requirements file but not in the directory.
else:
mismatches[normalized_library_name] = (
requirements_contents[normalized_library_name], None)
for normalized_library_name in directory_contents:
# Library exists in the directory but is not in the requirements file.
if normalized_library_name not in requirements_contents:
mismatches[normalized_library_name] = (
None, directory_contents[normalized_library_name])
return mismatches
def validate_metadata_directories():
"""Validates that each library installed in the 'third_party/python_libs'
has a corresponding metadata directory | |
<filename>OltreBot/cogs/music/music.py
import re
import discord
import lavalink
from typing import Union, Dict
from discord.ext import commands
from discord.ext.commands import Cog, Context
from OltreBot.util import get_logger
from OltreBot.util.colors import *
from .lavalink_voice_client import LavalinkVoiceClient
from .embed import MusicEmbed
import time
import ytm
YTM = ytm.YouTubeMusic()
LOGGER = get_logger('Music', sub_folder='cog')
url_rx = re.compile(r'https?://(?:www\.)?.+')
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.log = LOGGER
self.embed_id: Union[Dict, None] = None
# On Ready
@Cog.listener()
async def on_ready(self):
if not hasattr(self.bot, 'lavalink'): # This ensures the client isn't overwritten during cog reloads.
self.bot.lavalink = lavalink.Client(self.bot.user.id)
self.bot.lavalink.add_node('127.0.0.1', 2333, 'youshallnotpass', 'eu',
'default-node') # Host, Port, Password, Region, Name
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
""" Cog unload handler. This removes any event hooks that were registered. """
self.bot.lavalink._event_hooks.clear()
async def send_msg(self, ctx: Context, msg: str):
self.log.debug(f'For user: {ctx.author} -> {msg}')
await ctx.send(msg)
async def cog_before_invoke(self, ctx):
""" Command before-invoke handler. """
guild_check = ctx.guild is not None
# This is essentially the same as `@commands.guild_only()`
# except it saves us repeating ourselves (and also a few lines).
if guild_check:
await self.ensure_voice(ctx)
# Ensure that the bot and command author share a mutual voice channel.
return guild_check
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
self.log.error(f"Error <{error}> asked by {yellow(ctx.author.name)}")
await self.send_msg(ctx, error.original)
# The above handles errors thrown in this cog and shows them to the user.
# This shouldn't be a problem as the only errors thrown in this cog are from `ensure_voice`
# which contain a reason string, such as "Join a voice channel" etc. You can modify the above
# if you want to do things differently.
def get_player(self, guild_id: str) -> lavalink.DefaultPlayer:
return self.bot.lavalink.player_manager.get(guild_id)
def log_user_call_command(self, ctx: Context, cmd_name: str, *args):
self.log.info(f"Command: <{magenta(cmd_name)}| {cyan(' '.join(args))}> asked by {yellow(ctx.author.name)}")
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
# Create returns a player if one exists, otherwise creates.
# This line is important because it ensures that a player always exists for a guild.
# Most people might consider this a waste of resources for guilds that aren't playing, but this is
# the easiest and simplest way of ensuring players are created.
# These are commands that require the bot to join a voice channel (i.e. initiating playback).
# Commands such as volume/skip etc. don't require the bot to be in a voice channel so don't need listing here.
should_connect = ctx.command.name in ('play', 'radio')
if not ctx.author.voice or not ctx.author.voice.channel:
# Our cog_command_error handler catches this and sends it to the voice channel.
# Exceptions allow us to "short-circuit" command invocation via checks so the
# execution state of the command goes no further.
raise commands.CommandInvokeError('Join a voice channel first.')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected.')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions.')
player.store('channel', ctx.channel.id)
await ctx.author.voice.channel.connect(cls=LavalinkVoiceClient)
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voicechannel.')
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
# When this track_hook receives a "QueueEndEvent" from lavalink.py
# it indicates that there are no tracks left in the player's queue.
# To save on resources, we can tell the bot to disconnect from the voicechannel.
self.log.debug(f"Lavalink.Event QueueEndEvent")
guild_id = int(event.player.guild_id)
guild = self.bot.get_guild(guild_id)
for text_channel in guild.text_channels:
if text_channel.id in self.embed_id:
del self.embed_id[text_channel.id]
await guild.voice_client.disconnect(force=True)
if isinstance(event, lavalink.events.TrackEndEvent):
self.log.debug(f"Lavalink.Event TrackEndEvent")
if isinstance(event, lavalink.events.TrackStartEvent):
# When a new track start
self.log.debug(f"Lavalink.Event TrackStartEvent")
player: lavalink.DefaultPlayer = event.player
guild_id = int(event.player.guild_id)
guild = self.bot.get_guild(guild_id)
if player.current is not None:
embed = self.get_track_embed(self.bot.user, player.current)
for text_channel in guild.text_channels:
await self.send_music_embed(embed, text_channel=text_channel)
@commands.command(aliases=['p'])
async def play(self, ctx, *, query: str):
""" Searches and plays a song from a given query. """
# Get the player for this guild from cache.
self.log_user_call_command(ctx, 'play', query)
player = self.get_player(ctx.guild.id)
# Remove leading and trailing <>. <> may be used to suppress embedding links in Discord.
query = query.strip('<>')
# Check if the user input might be a URL. If it isn't, we can Lavalink do a YouTube search for it instead.
# SoundCloud searching is possible by prefixing "scsearch:" instead.
if not url_rx.match(query):
query = f'ytsearch:{query}'
# Get the results for the query from Lavalink.
start_time = time.time_ns()
results = await player.node.get_tracks(query)
exec_stamp = (time.time_ns() - start_time) / int(1e6)
# Results could be None if Lavalink returns an invalid response (non-JSON/non-200 (OK)).
# AAlternatively, results['tracks'] could be an empty array if the query yielded no tracks.
if not results or not results['tracks']:
return await self.send_msg(ctx, 'Nothing found!')
# Valid loadTypes are:
# TRACK_LOADED - single video/direct URL)
# PLAYLIST_LOADED - direct URL to playlist)
# SEARCH_RESULT - query prefixed with either ytsearch: or scsearch:.
# NO_MATCHES - query yielded no results
# LOAD_FAILED - most likely, the video encountered an exception during loading.
if results['loadType'] == 'SEARCH_RESULT':
track = results['tracks'][0]
# You can attach additional information to audio tracks through kwargs, however this involves
# constructing the AudioTrack class yourself.
track = lavalink.models.AudioTrack(track, ctx.author, recommended=True)
embed = MusicEmbed.search(self, ctx.author, track, exec_stamp)
player.add(requester=ctx.author.id, track=track)
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for idx, track in enumerate(tracks):
# Add all the tracks from the playlist to the queue.
player.add(requester=ctx.author, track=track)
embed = MusicEmbed.playlist(self, ctx.author, results["playlistInfo"]["name"], tracks, exec_stamp)
elif results['loadType'] == 'TRACK_LOADED':
track = results['tracks'][0]
track = lavalink.models.AudioTrack(track, ctx.author, recommended=True)
embed = self.get_track_embed(ctx.author, track)
player.add(requester=ctx.author, track=track)
elif results['loadType'] == 'NO_MATCHES':
embed = MusicEmbed.failed(self, ctx.author, "No Match found", exec_stamp)
elif results['loadType'] == 'LOAD_FAILED':
embed = MusicEmbed.failed(self, ctx.author, "Load failed", exec_stamp)
await ctx.send(embed=embed)
# We don't want to call .play() if the player is playing as that will effectively skip
# the current track.
if not player.is_playing:
await player.play()
@commands.command(aliases=['r'])
async def radio(self, ctx: Context, *, query: str):
# Logs
self.log_user_call_command(ctx, 'radio', query)
start_time = time.time_ns()
# Retrieve final link
try:
songs = YTM.search_songs(query)
song_id = songs['items'][0]['id']
radio_id = songs['items'][0]['radio']['playlist_id']
final_url = f"https://music.youtube.com/watch?v={song_id}&list={radio_id}"
# Get Player
await self.play(ctx, query=final_url)
except Exception as e:
exec_stamp = (time.time_ns() - start_time) * int(1e-6)
embed = MusicEmbed.failed(self, ctx.author, "Failed Radio", exec_stamp)
await ctx.send(embed=embed)
@commands.command()
async def current(self, ctx: Context):
""" Get current Track info. """
player = self.get_player(ctx.guild.id)
if not player.is_connected:
# We can't disconnect, if we're not connected.
return await self.send_msg(ctx, 'Not connected.')
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
# Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot
# may not disconnect the bot.
return await self.send_msg(ctx, "You're not in my voice-channel!")
if player.current:
track = player.current
embed = self.get_track_embed(ctx.author, track)
else:
embed = self.get_track_embed(ctx.author)
await self.send_music_embed(embed, ctx=ctx)
@commands.command(alias=['q'])
async def queue(self, ctx: Context):
""" Get current Track info. """
player = self.get_player(ctx.guild.id)
if not player.is_connected:
# We can't disconnect, if we're not connected.
return await self.send_msg(ctx, 'Not connected.')
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
# Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot
# may not disconnect the bot.
return await self.send_msg(ctx, "You're not in my voice channel!")
embed = MusicEmbed.playlist(self, ctx.author, "Current Queue", player.queue, 0.0)
await ctx.send(embed=embed)
@commands.command()
async def clear(self, ctx: Context):
""" Clear queue Tracks """
player = self.get_player(ctx.guild.id)
if not player.is_connected:
# We can't disconnect, if we're not connected.
return await self.send_msg(ctx, 'Not connected.')
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
# Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot
# may not disconnect the bot.
return await self.send_msg(ctx, "You're not in my voicechannel!")
embed = discord.Embed()
tracks = len(player.queue)
embed.title = 'Queue cleared'
embed.description = f'removed {tracks} tracks.'
player.queue.clear()
await ctx.send(embed=embed)
def get_track_embed(self, author: discord.client, track: lavalink.AudioTrack = None) -> discord.Embed:
if isinstance(track, lavalink.AudioTrack):
return MusicEmbed.track(self, author, track)
else:
return MusicEmbed.empty(self, author)
async def send_music_embed(self, embed: discord.Embed, ctx: Context = None,
text_channel: discord.TextChannel | |
| up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TypeAreaId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Area ID Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeAreaId']))
@property
def V6(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 0
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['V6']))
@property
def VFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): V-Flag: Value flag. If set, then the SID carries an absolute value label value
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VFlag']))
@property
def Weight(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Weight of the SID for the purpose of load balancing
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Weight']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Updates ospfv3 resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Adds a new ospfv3 resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved ospfv3 resources using find and the newly added ospfv3 resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained ospfv3 resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, LocalRouterID=None, Multiplier=None, Name=None, Ospfv3IfaceState=None, Ospfv3NeighborState=None, SessionInfo=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves ospfv3 resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ospfv3 resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ospfv3 resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- LocalRouterID (list(str)): Router ID
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- Ospfv3IfaceState (list(str[backup | down | dr | drOther | pointToPoint | unrecognized | waiting])): Logs additional information about the Interface State
- Ospfv3NeighborState (list(str[attempt | down | exchange | exStart | full | init | loading | multiNeighbor | none | twoWay])): Logs additional information about the Neighbor State
- SessionInfo (list(str[ifaceSessInfoAllNbrIn2Way | ifaceSessInfoAllNbrInattempt | ifaceSessInfoAllNbrInDown | ifaceSessInfoAllNbrInExchange | ifaceSessInfoAllNbrInExStart | ifaceSessInfoAllNbrInInit | ifaceSessInfoAllNbrInLoading | ifaceSessInfoFsmNotStarted | ifaceSessInfoSameNbrId | iPAddressNotRcvd | none])): Logs additional information about the session state
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching ospfv3 resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ospfv3 data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ospfv3 resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AdjSID=None, AreaId=None, AreaIdIp=None, AuthAlgo=None, BFlag=None, DeadInterval=None, DemandCircuit=None, EnableAdjSID=None, EnableAuthentication=None, EnableBfdRegistration=None, EnableFastHello=None, EnableIgnoreDbDescMtu=None, ExternalCapability=None, GFlag=None, HelloInterval=None, HelloMultiplier=None, InstanceId=None, Key=None, LFlag=None, LinkMetric=None, NetworkType=None, NssaCapability=None, PFlag=None, Priority=None, Router=None, SaId=None, TypeAreaId=None, V6=None, VFlag=None, Weight=None):
"""Base class infrastructure that gets a list of ospfv3 device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- AdjSID (str): optional regex of adjSID
- AreaId (str): optional regex of areaId
- AreaIdIp (str): optional regex of areaIdIp
- AuthAlgo (str): optional regex of authAlgo
- BFlag (str): optional regex of bFlag
- DeadInterval (str): optional regex of deadInterval
- DemandCircuit (str): optional regex of demandCircuit
- EnableAdjSID (str): optional regex of enableAdjSID
- EnableAuthentication (str): optional regex of enableAuthentication
- EnableBfdRegistration (str): optional regex of enableBfdRegistration
- EnableFastHello (str): optional regex of enableFastHello
- EnableIgnoreDbDescMtu (str): optional regex of enableIgnoreDbDescMtu
- ExternalCapability (str): optional regex of externalCapability
- GFlag (str): optional regex of gFlag
- HelloInterval (str): optional regex of helloInterval
- HelloMultiplier (str): optional regex of helloMultiplier
- InstanceId (str): optional regex of instanceId
- Key (str): optional regex of key
- LFlag (str): optional regex of lFlag
- LinkMetric (str): optional regex of linkMetric
- NetworkType (str): optional regex of networkType
- NssaCapability (str): optional regex of nssaCapability
- PFlag (str): optional regex of pFlag
- Priority (str): optional regex of priority
- Router (str): optional regex of router
- SaId (str): optional regex of | |
<gh_stars>0
#!/usr/bin/env python
# KMB 11/06/2008
import distutils
from distutils import util as distutils_util
import os # use os for manipulating path names
import platform
import sys # use sys for parsing command line
import time # use time for setting playback rate
import warnings
import wx
from wx import xrc
import numpy as num
num.seterr( invalid='raise' ) # raise errors on numerical exceptions
num.seterr( divide='raise' )
num.seterr( over='warn' )
num.seterr( under='ignore' )
#warnings.filterwarnings("error", category=num.VisibleDeprecationWarning)
from scipy.misc import imresize
from matplotlib import __version__ as mpl_version
try:
from psutil import __version__ as psutil_version
except ImportError:
psutil_version = "-not found-"
from cv2 import __version__ as opencv_version
from version import __version__, DEBUG, DEBUG_REPEATABLE_BEHAVIOR
import annfiles as annot
import batch
import bg
import algorithm
import draw
import imagesk
import movies
import ellipsesk as ell
from params import params, const
if DEBUG:
import pdb
class CtraxApp (algorithm.CtraxAlgorithm): # eventually inherits from wx.App
def has( self, attr ):
return hasattr( self, attr ) and getattr( self, attr ) is not None
def OnInit( self ):
"""
Start up the Ctrax GUI
"""
self.InitState() # in settings.py
self.ParseCommandLine()
self.InitGUI() # in settings.py
# read saved settings, overwriting defaults
self.ReadUserfile() # in settings.py
# draw GUI
self.frame.Show()
self.alive = True
params.app_instance = self
if params.interactive:
print "********** Ctrax Warning and Error Messages **********"
print "Error and warning messages will appear in this window."
print "If you have trouble and are contacting the Ctrax mailing"
print "list (see http://groups.google.com/group/ctrax ), be"
print "sure to copy and paste the relevant messages from this"
print "window into your email."
print "*********************** Ctrax ************************\n"
print "system version", platform.platform()
print "Python version", sys.version
print "Wx version", wx.__version__
print "OpenCV version", opencv_version
print "Matplotlib version", mpl_version
print "psutil version", psutil_version
print "Ctrax version", __version__
# open movie, ann file
self.OpenMovie( new_movie=False )
return True
def PrintUsage(self):
"""
Print command line arguments for Ctrax.
"""
self.RestoreStdio()
print """Ctrax version {0}:
Optional Command Line Arguments:
--Interactive={True,False}
--Output=<movie.fmf.ann>
--Input=<movie.fmf>
--SettingsFile=<settings.ann>
--AutoEstimateBackground={True,False}
--AutoEstimateShape={True,False}
--AutoDetectCircularArena={True,False}
--CompressMovie=<movie.sbfmf>
--MatFile=<movie.mat>
--CsvFile=<movie.csv>
--DiagnosticsFile=<movie_ctraxdiagnostics.txt>
--FirstFrameTrack={0,1,...}
--LastFrameTrack={-1,0,1,...}
--ResumeTracking={False,True}
--FlipMovieUD={False,True}
--EnforceShapeBounds={True,False}
Example:
Ctrax --Interactive=True --Input=movie1.fmf \\
--Output=movie1.fmf.ann \\
--SettingsFile=exp1.ann \\
--MatFile=movie1.mat \\
--DiagnosticsFile=movie1_ctraxdiagnostics.txt
By default, Interactive=True, AutoEstimateBackground=True,
AutoEstimateShape=True, AutoDetectCircularArena=True,
FirstFrameTrack=0, LastFrameTrack=-1
(meaning to track until the end of the video),
ResumeTracking=False, FlipMovieUD=False
If not in interactive mode, then Input must be defined.
If Input is movie1.fmf and Output is not defined, then output
and settings will go to movie1.fmf.ann in non-interactive mode.
Existing annotation files are always backed up before being
overwritten, as long as the user has the appropriate
permissions to the output directory.
If CompressMovie is not set, then a compressed SBFMF will not
be created.
If MatFile is not set, then <basename>.mat will be used,
where <basename> is the base name of the movie.
If CsvFile is not set, no CSV output will be exported.
If DiagnosticsFile is not set, then
<basename>_ctraxdiagnostics.txt will be used.
""".format(__version__)
def ParseCommandLine(self):
"""
Interpret command line arguments.
"""
args = sys.argv[1:]
self.movie = None
self.ann_filename = None
self.start_frame = 0
# KB 20120109: added support for last_frame command-line argument
self.last_frame = num.inf
self.dowritesbfmf = False
self.input_file_was_specified = False
self.output_file_was_specified = False
if len( args ) == 1:
if args[0] == '--help':
self.PrintUsage()
sys.exit(1)
elif args[0] == '--version':
print "Ctrax", __version__
sys.exit(1)
elif args[0].startswith( '-psn_' ):
# in Mac, launching from Finder sends a process serial number
args = []
elif not '=' in args[0]:
# in Windows, it could be a bare movie name
args = ['--input=%s'%args[0]]
# parse first for arguments that could alter other arguments' effects
for arg in args:
if arg.lower() == '--interactive=false':
params.interactive = False
params.enable_feedback( False )
# if we were redirecting to an output window,
# restore stdio to the command line prompt
if not os.getenv('CTRAX_REDIRECT_FILENAME'):
self.RestoreStdio()
for i in range(len(args)):
# the arguments should be of the form --<paramname>=<paramvalue>
try:
name,value = args[i].split('=',1)
except:
print 'Error parsing command line arguments. No equals sign found. Usage: '
self.PrintUsage()
raise NotImplementedError
if name.lower() == '--interactive':
continue # already handled
elif name.lower() == '--input':
if hasattr( self, 'frame' ):
openframe = self.frame
else:
openframe = None
self.movie = movies.Movie( value, interactive=params.interactive,
parentframe=openframe, open_now=True )
self.input_file_was_specified = True
elif name.lower() == '--output':
self.ann_filename = value
self.output_file_was_specified = True
elif name.lower() == '--settingsfile':
self.settingsfilename = value
print "settingsfile = " + str(self.settingsfilename)
elif name.lower() == '--autoestimatebackground':
if value.lower() == 'false':
params.batch_autodetect_bg_model = False
elif name.lower() == '--autoestimateshape':
if value.lower() == 'false':
params.batch_autodetect_shape = False
elif name.lower() == '--autodetectcirculararena':
if value.lower() == 'false':
params.batch_autodetect_arena = False
elif value.lower() == 'none':
params.batch_autodetect_arena = None
elif name.lower() == '--compressmovie':
self.writesbfmf_filename = value
self.dowritesbfmf = True
elif name.lower() == '--matfile':
self.matfilename = value
elif name.lower() == '--csvfile':
self.csvfilename = value
elif name.lower() == '--diagnosticsfile':
self.diagnostics_filename = value
elif name.lower() == '--firstframetrack':
# KB 20120109: added support for start_frame command-line argument
try:
start_frame = float(value)
if start_frame < 0 or round(start_frame) != start_frame:
raise ValueError
except:
print "FirstFrameTrack must be an integer greater than or equal to 0"
self.PrintUsage()
raise
self.start_frame = int(start_frame)
print "setting params.start_frame = " + str(int(start_frame))
params.start_frame = int(start_frame)
elif name.lower() == '--lastframetrack':
try:
last_frame = float(value)
if round(last_frame) != last_frame:
raise NotImplementedError
if last_frame < 0:
last_frame = num.inf
except:
print "LastFrameTrack must be an integer"
self.PrintUsage()
raise
self.last_frame = last_frame
elif name.lower() == '--resumetracking':
if value.lower() == 'true':
params.noninteractive_resume_tracking = True
elif name.lower() == '--flipmovieud':
if value.lower() == 'true':
params.movie_flipud = True
elif name.lower() == '--enforceshapebounds':
if value.lower() == 'false':
params.enforce_minmax_shape = False
else:
print 'Error parsing command line arguments. Unknown parameter name "%s". Usage: '%name
self.PrintUsage()
raise NotImplementedError
if self.start_frame > self.last_frame:
print "FirstFrameTrack must be <= LastFrameTrack"
self.PrintUsage()
raise NotImplementedError
if params.noninteractive_resume_tracking:
# to resume tracking, output file must exist
# (and should have ann data in it)
if (not self.output_file_was_specified) or \
self.ann_filename is None or \
(not os.path.isfile( self.ann_filename )):
print "To resume tracking, an existing output file must be specified."
raise NotImplementedError
# run noninteractive mode
if params.interactive == False:
self.run_noninteractive()
sys.exit( 0 )
def run_noninteractive(self):
"""
Run Ctrax in non-interactive mode.
"""
starttime = time.time()
self.frame = None
# input movie name must be specified on the command line
if not self.has( 'movie' ):
print 'Error parsing command line arguments.\n\
Input file must be specified in non-interactive mode.\n\
Usage: '
self.PrintUsage()
raise NotImplementedError
if not self.has( 'ann_filename' ):
self.ann_filename = self.get_filename_with_extension( '+.ann' )
if self.has( 'ann_filename' ):
print "ann_filename = " + str(self.ann_filename)
if not self.has( 'settingsfilename' ):
self.settingsfilename = self.ann_filename
# open the movie
print "Opening movie " + self.movie.filename
self.OpenMovie( new_movie=False )
if not self.has( 'movie' ):
print "failed opening movie!"
sys.exit( 1 )
# start_frame can be reset by annfile if output file exists?
params.start_frame = self.start_frame
# KB 20120109: do_refresh not set otherwise
self.do_refresh = True
# actually, it's previously set to False in InitState() -JAB 1/10/12
# does it need to be True? what happens if it's not set here?
if not self.has('diagnostics_filename'):
self.diagnostics_filename = self.get_filename_with_extension( '_ctraxdiagnostics.txt' )
print "Diagnostics info will be written to " + self.diagnostics_filename
# do the tracking steps
print "DoAll..."
self.DoAll()
print "process completed in", time.time()-starttime, "s"
def LoadSettings( self ):
"""
Load parameter values from another annotation file.
"""
doreadbgmodel = not (params.interactive or self.IsBGModel())
if doreadbgmodel and self.has( 'movie' ):
bg_img_shape = (self.movie.get_height(), self.movie.get_width())
else:
bg_img_shape = None
print "loading settings from file " + self.settingsfilename
print "reading bg?", doreadbgmodel, "-",
if not doreadbgmodel:
if params.interactive:
print "in interactive mode"
elif self.IsBGModel():
print "bg model exists"
else:
print "??"
try:
annot.LoadSettings(self.settingsfilename,
self.bg_imgs,
bg_img_shape=bg_img_shape,
readbgmodel=doreadbgmodel)
except:
print 'Could not read annotation file ' + self.settingsfilename
raise
def MacOpenFile( self, filename ):
"""Fires when a compatible file is dropped onto the app on Mac."""
self.movie = movies.Movie( filename, interactive=params.interactive,
parentframe=self.frame, open_now=True )
self.OpenMovie( new_movie=False )
def OpenMovie( self, new_movie=True ):
"""Attempt to open a movie given the current filename."""
if new_movie:
try:
# open movie file
self.movie = movies.Movie( self.dir, params.interactive,
default_extension=self.last_movie_ext )
if DEBUG:
print "Opened movie " + str(self.movie.filename)
except ImportError: # open was cancelled
return
except Exception, details:
raise #########################
# error messages should be handled by the movie object
if self.has( 'movie' ):
if params.interactive:
wx.MessageBox( "Could not open the movie " + self.movie.filename,
"Error", wx.ICON_ERROR|wx.OK )
else:
print "Could not open the movie " + self.movie.filename
print details
self.movie = | |
import json
import logging
import os
import pathlib
import shutil
import traceback
from datetime import datetime, timedelta, timezone
from statistics import mean
import dateutil.parser
import humanize
import pandas as pd
import pytz
from django.conf import settings
from common.notify import Notify
from des.dao import (DesSkybotJobDao, DesSkybotJobResultDao,
ExposureDao)
from des.skybot.import_positions import DESImportSkybotPositions
from skybot.skybot_server import SkybotServer
from des.models import SummaryDynclass
from des.summary import SummaryResult
class AbortSkybotJobError(Exception):
pass
class DesSkybotPipeline():
def __init__(self):
self.logger = logging.getLogger("skybot")
self.logger_import = logging.getLogger("skybot_load_data")
# Diretorio onde ficam os csv baixados do skybot
self.base_path = settings.SKYBOT_OUTPUT
# Radius usado na query do skybot com tamanho suficiente para a exposição do des.
# Cone search radius in Degres
self.radius = 1.2
# Date Correction for DECam
self.date_correction = 1.05
# Observer Code for Cerro Tololo-DECam
self.observer_location = 'w84'
# Filter to retrieve only objects with a position error lesser than the given value
self.position_error = 0
self.spdao = DesSkybotJobDao(pool=False)
self.epdao = ExposureDao(pool=False)
self.dsdao = DesSkybotJobResultDao(pool=False)
self.attempts_on_fail_by_exposure = 1
def get_job_by_id(self, id):
return self.spdao.get_by_id(id)
def update_job(self, job):
return self.spdao.update_by_id(job['id'], job)
def complete_job(self, job):
return self.spdao.complete_job(job['id'], job)
def create_skybot_log(self, job_path):
"""Cria um arquivo de log no diretório execução do Job.
Este log é uma cópia do log definido no settings.
Neste log estão as informações sobre as requisições.
Arguments:
job_path {str} -- Path onde o job está sendo executado. normalmente Model job['path']
"""
# Adiciona um Log Handle para fazer uma copia do log no diretório do processo.
fh = logging.FileHandler(os.path.join(job_path, 'skybot_requests.log'))
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def create_loaddata_log(self, job_path):
"""Cria um arquivo de log no diretório execução do Job.
Este log é uma cópia do log definido no settings.
Neste log estão as informações sobre a importaçao dos dados no banco de dados.
Arguments:
job_path {str} -- Path onde o job está sendo executado. normalmente Model job['path']
"""
# Adiciona um Log Handle para fazer uma copia do log no diretório do processo.
fh = logging.FileHandler(os.path.join(job_path, 'skybot_loaddata.log'))
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(message)s')
fh.setFormatter(formatter)
self.logger_import.addHandler(fh)
def get_base_path(self):
"""Retorna o diretório onde estão todos os jobs do skybot.
este diretório está expecificado na settings.SKYBOT_OUTPUT
Returns:
str -- Path onde ficam os resultados de todos os jobs skybot.
"""
return self.base_path
def get_job_path(self, job_id):
"""Retorna o path para o Job baseado em seu id.
o diretório de um job é composto por base_path/des_skybot_<job_id>
Arguments:
job_id {int} -- Id do Job que está sendo executado
Returns:
str -- Path onde o job está sendo executado.
"""
output_path = os.path.join(
self.get_base_path(), "des_skybot_%s" % str(job_id))
return output_path
def create_job_dir(self, job_id):
"""Cria o Diretório para o job se não existir.
Arguments:
job_id {int} -- Id do Job que está sendo executado
Returns:
job_path {str} -- Path onde o job está sendo executado.
"""
path = self.get_job_path(job_id)
if not os.path.exists(path):
os.mkdir(path)
self.logger.info("A directory has been created for the job.")
return path
def get_positions_path(self, job_id):
"""Retorna o path onde os arquivos de resultado do skybot vão ficar no final da execução.
Durante a execução os arquivos ficam no job_path, mas depois de importados ficam neste diretório.
Arguments:
job_id {int} -- Id do Job que está sendo executado
Returns:
str -- O diretório de outputs é o job_path/outputs.
"""
return os.path.join(self.get_job_path(job_id), 'outputs')
def create_positions_path(self, job_id):
"""Cria um diretório de outputs se não existir.
Arguments:
job_id {int} -- Id do Job que está sendo executado
Returns:
str -- O diretório de outputs é o job_path/outputs.
"""
path = self.get_positions_path(job_id)
if not os.path.exists(path):
os.mkdir(path)
self.logger.info("A directory has been created for the Outputs.")
return path
def delete_job_dir(self, job_id):
"""Apaga um diretório de Job com todo seu conteudo.
Arguments:
job_id {int} -- Id do Job que sera apagado.
"""
path = self.get_job_path(job_id)
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
self.logger.debug("Job directory has been deleted.")
def query_exposures_by_period(self, start, end):
"""Retorna todas as Des/Exposures que tenham date_obs entre o periodo start, end.
23/06/2020 - Foi modificada a query para retornar somente as exposições que ainda não foram executadas.
Arguments:
start {date} -- Data Inicial do periodo
end {date} -- Data Final do periodo
Returns:
Array -- Array com as exposições que atendem ao periodo. cada exposição tem o mesmo conteudo do model des.Exposures
"""
# Esta query retorna todas as exposições independente de terem sido executadas ou não
# rows = self.epdao.exposures_by_period(start, end)
# Esta query retorna todas as exposições que ainda não foram executadas pelo skybot
rows = self.dsdao.not_exec_by_period(start, end)
self.logger.info(
"[%s] Exposures for the period were found." % len(rows))
return rows
def get_exposures_filepath(self, job_path):
"""Retorna o filepath para o arquivo csv que guarda as informações das exposições.
Arguments:
job_path {str} -- Path onde o job está sendo executado. normalmente Model job['path']
Returns:
str -- filepath para exposures.csv, é a junção de job_path/exposures.csv
"""
filepath = os.path.join(job_path, 'exposures.csv')
return filepath
def get_exposures(self, job_id, start, end):
"""Retorna todas as exposições para um job, executa a query uma unica vez e guarda o resultado
no arquivo exposures.csv. se o arquivo exisitir não executa a query novamente.
Arguments:
job_id {int} -- Id do Job que está sendo executado.
start {date} -- Data Inicial do periodo
end {date} -- Data Final do periodo
Returns:
pandas.Dataframe -- Retorna um dataframe com as exposições.
"""
# Verifica se já existe um arquivo com as exposições criado.
job_path = self.get_job_path(job_id)
filepath = self.get_exposures_filepath(job_path)
if not os.path.exists(filepath):
# Se não existir faz a query, cria um pandas dataframe e salva em arquivo.
# Executa a query para saber todas as exposições para este periodo.
rows = self.query_exposures_by_period(
start.strftime("%Y-%m-%d 00:00:00"),
end.strftime("%Y-%m-%d 23:59:59"))
# Cria um pandas dataframe com as exposições.
df = self.create_exposure_dataframe(rows, job_path)
else:
# Se existir le o arquivo e retorna o pandas dataframe.
df = self.read_exposure_dataframe(job_path)
return df
def apply_corection_in_date_obs(self, date_obs, exptime):
"""Aplica uma correção a data de observação das exposições
esta correção é :
date_obs = date_obs + (exptime + correction)/2
no caso da DECam o valor de correction é: 1.05
Args:
date_obs (datetime): Data de observação da exposição no formato iso: '2012-11-10 04:09:45.855327+00:00'
exptime (float): Temnpo da exposição, atributo exptime da tabela des/exposure
Returns:
datetime: Data de observação mais o valor da correção.
"""
date = dateutil.parser.parse(str(date_obs))
correction = (float(exptime + float(self.date_correction)))/2
date = date + timedelta(seconds=correction)
return date
def create_exposure_dataframe(self, rows, job_path):
"""Cria um dataframe para as exposições.
Arguments:
rows {Array} -- Uma lista de exposições
job_path {str} -- Path onde o job está sendo executado. normalmente Model job['path']
Returns:
pandas.Dataframe -- Retorna um dataframe com as exposições.
"""
filepath = self.get_exposures_filepath(job_path)
df = pd.DataFrame(
rows, columns=['id', 'date_obs', 'radeg', 'decdeg', 'exptime'])
# 20-07-2020 Martin solicitou que fosse aplicado uma correção ao campo date_obs.
# date_obs = date_obs + (exptime + correction)/2
# onde correction para DECam é 1.05
df['date_with_correction'] = ""
if df.shape[0] > 0:
df['date_with_correction'] = df.apply(
lambda row: self.apply_corection_in_date_obs(row['date_obs'], row['exptime']), axis=1)
# Adicionar os campos que serão enviados para o Skybot
df['radius'] = self.radius
df['observer_location'] = self.observer_location
df['position_error'] = self.position_error
# Escreve o dataframe em arquivo.
df.to_csv(filepath, sep=';', header=True, index=False)
self.logger.info("An archive was created with the Exposures.")
self.logger.debug("Exposures File: [%s]" % filepath)
return df
def read_exposure_dataframe(self, job_path):
"""Retorna o conteudo do dataframe de exposições.
Arguments:
job_path {str} -- Path onde o job está sendo executado. normalmente Model job['path']
Returns:
pandas.Dataframe -- Retorna um dataframe com as exposições.
"""
filepath = self.get_exposures_filepath(job_path)
df = pd.read_csv(filepath, delimiter=';')
return df
def get_request_dataframe_filepath(self, job_path):
"""Retorna o filepath do arquivo requests.csv
Arguments:
job_path {str} -- Path onde o job está sendo executado. normalmente Model job['path']
Returns:
str -- Filepath for job_path/requests.csv
"""
filepath = os.path.join(job_path, "requests.csv")
return filepath
def create_request_dataframe(self, rows, job_path):
"""Cria um dataframe com os dados das requisições feitas ao skybot.
cada linha representa uma exposição.
Arguments:
rows {Array} -- Uma lista de exposições e os dados da requisição ao skybot.
job_path {str} -- Path onde o job está sendo executado. normalmente Model job['path']
Returns:
pandas.Dataframe -- Retorna um dataframe com as requisições..
"""
filepath = self.get_request_dataframe_filepath(job_path)
df = pd.DataFrame(rows, columns=[
'exposure', 'success', 'ticket', 'positions', 'start',
'finish', 'execution_time', | |
<filename>object/N2DCEX_target.py<gh_stars>10-100
import argparse
import os, sys
import os.path as osp
import torchvision
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import network, loss
from torch.utils.data import DataLoader
from data_list import ImageList, ImageList_idx
import random, pdb, math, copy
from tqdm import tqdm
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from numpy import linalg as LA
from torch.nn import functional as F
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from sklearn.manifold import TSNE
def op_copy(optimizer):
for param_group in optimizer.param_groups:
param_group['lr0'] = param_group['lr']
return optimizer
def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):
decay = (1 + gamma * iter_num / max_iter) ** (-power)
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr0'] * decay
param_group['weight_decay'] = 1e-3
param_group['momentum'] = 0.9
param_group['nesterov'] = True
return optimizer
def image_train(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize
])
def data_load(args):
## prepare data
dsets = {}
dset_loaders = {}
train_bs = args.batch_size
txt_tar = open(args.t_dset_path).readlines()
txt_test = open(args.test_dset_path).readlines()
if not args.da == 'uda':
label_map_s = {}
for i in range(len(args.src_classes)):
label_map_s[args.src_classes[i]] = i
new_tar = []
for i in range(len(txt_tar)):
rec = txt_tar[i]
reci = rec.strip().split(' ')
if int(reci[1]) in args.tar_classes:
if int(reci[1]) in args.src_classes:
line = reci[0] + ' ' + str(label_map_s[int(reci[1])]) + '\n'
new_tar.append(line)
else:
line = reci[0] + ' ' + str(len(label_map_s)) + '\n'
new_tar.append(line)
txt_tar = new_tar.copy()
txt_test = txt_tar.copy()
dsets["target"] = ImageList_idx(txt_tar, transform=image_train())
dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False)
dsets["test"] = ImageList_idx(txt_test, transform=image_test())
dset_loaders["test"] = DataLoader(dsets["test"], batch_size=train_bs*3, shuffle=False, num_workers=args.worker, drop_last=False)
return dset_loaders
def cal_acc(loader, netF, netB, netC, flag=False):
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
outputs = netC(netB(netF(inputs)))
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
mean_ent = torch.mean(loss.Entropy(nn.Softmax(dim=1)(all_output))).cpu().data.item()
if flag:
matrix = confusion_matrix(all_label, torch.squeeze(predict).float())
acc = matrix.diagonal()/matrix.sum(axis=1) * 100
aacc = acc.mean()
aa = [str(np.round(i, 2)) for i in acc]
acc = ' '.join(aa)
return aacc, acc
else:
return accuracy*100, mean_ent
def train_target(args):
DEBUG = 1
dset_loaders = data_load(args)
## set base network
if args.net[0:3] == 'res':
netF = network.ResBase(res_name=args.net).cuda()
elif args.net[0:3] == 'vgg':
netF = network.VGGBase(vgg_name=args.net).cuda()
netB = network.feat_bootleneck(type=args.classifier, feature_dim=netF.in_features, bottleneck_dim=args.bottleneck).cuda()
netC = network.feat_classifier(type=args.layer, class_num = args.class_num, bottleneck_dim=args.bottleneck).cuda()
modelpath = args.output_dir_src + '/source_F.pt'
netF.load_state_dict(torch.load(modelpath))
modelpath = args.output_dir_src + '/source_B.pt'
netB.load_state_dict(torch.load(modelpath))
modelpath = args.output_dir_src + '/source_C.pt'
netC.load_state_dict(torch.load(modelpath))
netC.eval()
for k, v in netC.named_parameters():
v.requires_grad = False
param_group = []
for k, v in netF.named_parameters():
if args.lr_decay1 > 0:
param_group += [{'params': v, 'lr': args.lr * args.lr_decay1}]
else:
v.requires_grad = False
for k, v in netB.named_parameters():
if args.lr_decay2 > 0:
param_group += [{'params': v, 'lr': args.lr * args.lr_decay2}]
else:
v.requires_grad = False
optimizer = optim.SGD(param_group)
optimizer = op_copy(optimizer)
max_iter = args.max_epoch * len(dset_loaders["target"])
interval_iter = max_iter // args.interval
iter_num = 0
iter_num_update = 0
while iter_num < max_iter:
try:
inputs_test, _, tar_idx = iter_test.next()
except:
iter_test = iter(dset_loaders["target"])
inputs_test, _, tar_idx = iter_test.next()
if inputs_test.size(0) == 1:
continue
if iter_num % interval_iter == 0 and args.cls_par > 0:
iter_num_update += 1
netF.eval()
netB.eval()
mem_label, feas_all, label_confi, label_all = obtain_label_ts(dset_loaders['test'], netF, netB, netC, args, iter_num_update)
mem_label = torch.from_numpy(mem_label).cuda()
netF.train()
netB.train()
inputs_test = inputs_test.cuda()
iter_num += 1
lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)
ln = inputs_test.size(0)
alpha = 0.0 * np.random.randn(ln, 1) + 0.5
alpha = torch.from_numpy(alpha)
alpha = alpha.type(torch.FloatTensor)
alpha = alpha.cuda()
features_test_F = netF(inputs_test)
features_test_N, _, _ = obtain_nearest_trace(features_test_F, feas_all, label_confi)
features_test_N = features_test_N.cuda()
features_test_F = netB(features_test_F)
outputs_test_F = netC(features_test_F)
features_test_N = netB(features_test_N)
outputs_test_N = netC(features_test_N)
if args.cls_par > 0:
pred = mem_label[tar_idx]
classifier_loss_F = nn.CrossEntropyLoss()(outputs_test_F, pred)
classifier_loss_N = nn.CrossEntropyLoss()(outputs_test_N, pred)
classifier_loss = 1.0*classifier_loss_F + 1.0*classifier_loss_N
classifier_loss *= args.cls_par
if iter_num < interval_iter and args.dset == "VISDA-C":
classifier_loss *= 0
else:
classifier_loss = torch.tensor(0.0).cuda()
if args.ent:
softmax_out_F = nn.Softmax(dim=1)(outputs_test_F)
softmax_out_N = nn.Softmax(dim=1)(outputs_test_N)
softmax_out = 1.0*softmax_out_F + 1.0*softmax_out_N
entropy_loss = torch.mean(loss.Entropy(softmax_out))
if args.gent:
msoftmax = softmax_out.mean(dim=0)
gentropy_loss = torch.sum(-msoftmax * torch.log(msoftmax + args.epsilon))
entropy_loss -= gentropy_loss
im_loss = entropy_loss * args.ent_par
classifier_loss += im_loss
optimizer.zero_grad()
classifier_loss.backward()
optimizer.step()
if iter_num % interval_iter == 0 or iter_num == max_iter:
netF.eval()
netB.eval()
if args.dset=='VISDA-C':
acc_s_te, acc_list = cal_acc(dset_loaders['test'], netF, netB, netC, True)
log_str = 'Task: {}, Iter:{}/{}; Accuracy = {:.2f}%'.format(args.name, iter_num, max_iter, acc_s_te) + '\n' + acc_list
else:
acc_s_te, _ = cal_acc(dset_loaders['test'], netF, netB, netC, False)
log_str = 'Task: {}, Iter:{}/{}; Accuracy = {:.2f}%'.format(args.name, iter_num, max_iter, acc_s_te)
args.out_file.write(log_str + '\n')
args.out_file.flush()
print(log_str+'\n')
netF.train()
netB.train()
if args.issave:
torch.save(netF.state_dict(), osp.join(args.output_dir, "target_F_" + args.savename + ".pt"))
torch.save(netB.state_dict(), osp.join(args.output_dir, "target_B_" + args.savename + ".pt"))
torch.save(netC.state_dict(), osp.join(args.output_dir, "target_C_" + args.savename + ".pt"))
return netF, netB, netC
def print_args(args):
s = "==========================================\n"
for arg, content in args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
def obtain_label_ts(loader, netF, netB, netC, args, iter_num_update_f):
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for _ in range(len(loader)):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
feas_F = netF(inputs)
feas = netB(feas_F)
outputs = netC(feas)
if start_test:
all_fea_F = feas_F.float().cpu()
all_fea = feas.float().cpu()
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_fea_F = torch.cat((all_fea_F, feas_F.float().cpu()), 0)
all_fea = torch.cat((all_fea, feas.float().cpu()), 0)
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
all_output = nn.Softmax(dim=1)(all_output)
ent = torch.sum(-all_output * torch.log(all_output + args.epsilon), dim=1)
unknown_weight = 1 - ent / np.log(args.class_num)
_, predict = torch.max(all_output, 1)
len_unconfi = int(ent.shape[0]*0.5)
idx_unconfi = ent.topk(len_unconfi, largest=True)[-1]
idx_unconfi_list_ent = idx_unconfi.cpu().numpy().tolist()
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
if args.distance == 'cosine':
all_fea = torch.cat((all_fea, torch.ones(all_fea.size(0), 1)), 1)
all_fea = (all_fea.t() / torch.norm(all_fea, p=2, dim=1)).t()
all_fea = all_fea.float().cpu().numpy()
K = all_output.size(1)
aff = all_output.float().cpu().numpy()
initc = aff.transpose().dot(all_fea)
initc = initc / (1e-8 + aff.sum(axis=0)[:,None])
cls_count = np.eye(K)[predict].sum(axis=0)
labelset = np.where(cls_count>args.threshold)
labelset = labelset[0]
dd = cdist(all_fea, initc[labelset], args.distance)
pred_label = dd.argmin(axis=1)
pred_label = labelset[pred_label]
#--------------------use dd to get confi_idx and unconfi_idx-------------
dd_min = dd.min(axis = 1)
dd_min_tsr = torch.from_numpy(dd_min).detach()
dd_t_confi = dd_min_tsr.topk(int((dd.shape[0] * 0.5)), largest = False)[-1]
dd_confi_list = dd_t_confi.cpu().numpy().tolist()
dd_confi_list.sort()
idx_confi = dd_confi_list
idx_all_arr = np.zeros(shape = dd.shape[0], dtype = np.int64)
# idx_all_list = list(idx_all_arr)
idx_all_arr[idx_confi] = 1
idx_unconfi_arr = np.where(idx_all_arr == 0)
idx_unconfi_list_dd = list(idx_unconfi_arr[0])
idx_unconfi_list = list(set(idx_unconfi_list_dd).intersection(set(idx_unconfi_list_ent)))
#------------------------------------------------------------------------
label_confi = np.ones(ent.shape[0], dtype="int64")
label_confi[idx_unconfi_list] = 0
_, all_idx_nn, _ = obtain_nearest_trace(all_fea_F, all_fea_F, label_confi)
ln = label_confi.shape[0]
gamma = 0.15 * np.random.randn(ln, 1) + 0.85
for round in range(1):
aff = np.eye(K)[pred_label]
initc = aff.transpose().dot(all_fea)
initc = initc / (1e-8 + aff.sum(axis=0)[:,None])
dd_sf = cdist(all_fea, initc[labelset], args.distance)
dd_nn = dd_sf[all_idx_nn]
dd = gamma*dd_sf + (1-gamma)*dd_nn
pred_label = dd.argmin(axis=1)
pred_label = labelset[pred_label]
acc = np.sum(pred_label == all_label.float().numpy()) / len(all_fea)
log_str = 'Accuracy = {:.2f}% -> {:.2f}%'.format(accuracy * 100, acc * 100)
args.out_file.write(log_str + '\n')
args.out_file.flush()
print(log_str+'\n')
return pred_label.astype('int'), all_fea_F, label_confi, all_label
def obtain_nearest_trace(data_q, data_all, lab_confi):
data_q_ = data_q.detach()
data_all_ = data_all.detach()
data_q_ = data_q_.cpu().numpy()
data_all_ = data_all_.cpu().numpy()
num_sam = data_q.shape[0]
LN_MEM = 70
flag_is_done = 0 # indicate whether the trace process has done over the target dataset
ctr_oper = 0 # counter the operation time
idx_left = np.arange(0, num_sam, 1)
mtx_mem_rlt = -3*np.ones((num_sam, LN_MEM), dtype='int64')
mtx_mem_ignore = np.zeros((num_sam, LN_MEM), dtype='int64')
is_mem = 0
mtx_log = np.zeros((num_sam, LN_MEM), dtype='int64')
indices_row = np.arange(0, num_sam, 1)
flag_sw_bad = 0
nearest_idx_last = np.array([-7])
while flag_is_done == 0:
nearest_idx_tmp, idx_last_tmp = get_nearest_sam_idx(data_q_, data_all_, is_mem, ctr_oper, mtx_mem_ignore, nearest_idx_last)
is_mem = 1
nearest_idx_last = nearest_idx_tmp
if ctr_oper == (LN_MEM-1):
flag_sw_bad = 1
else:
flag_sw_bad = 0
mtx_mem_rlt[:, ctr_oper] = nearest_idx_tmp
mtx_mem_ignore[:, ctr_oper] = idx_last_tmp
lab_confi_tmp = lab_confi[nearest_idx_tmp]
idx_done_tmp = np.where(lab_confi_tmp == 1)[0]
idx_left[idx_done_tmp] = -1
if flag_sw_bad == 1:
idx_bad = np.where(idx_left >= | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME> <<EMAIL>>"
import sys
import random
import math as np
import pygame
class PolyObstacle:
"""
Object that represents the an obstacle represented
by a series of points (in the node list) which make up a
set of lines. These lines represent the exterior of an obstacle
"""
def __init__(self, _nodes, _screen, **kwargs):
"""
Creates a PolyObstacle instance and initializes certain global
variables
@param _nodes A list of nodes used to represent the vertices
of the polygon
@param _screen The PyGame screen that is used to draw the obstacle
"""
## A list of nodes used to represent the vertices
self.nodes = _nodes
## A dictionary of colors defined in pygame
self.colors = pygame.color.THECOLORS
## The PyGame screen that is used to draw the obstacle
self.screen = _screen
## Bondaries of the simualation
self.boundary = (_screen.get_width(), _screen.get_height())
## Defines wether the obstacle is dynamic or not
self.dynamic = kwargs.get("dynamic", False)
## Velocity of the obstacle
self.velocity = [3, 0]
## The displacement of the obstacle
self.displacement = 0
## Max displacement allowed
self.max_displacement = 60
## List of static obstacles
self.obstacles = list()
## Start point
self.start_point = kwargs.get("start_point", None)
## End point
self.end_point = kwargs.get("end_point", None)
self.estimatePoly()
def removeSelfFromObstacleList(self):
"""
Removes self from obstacle list
"""
for obst in self.obstacles:
if id(self) == id(obst):
self.obstacles.remove(obst)
def norm(self, p1, p2):
"""
Gets the Eulidean distance between p1 and p2
@param p1, p2 Points in space
@return The distance between p1 and p2
"""
return np.sqrt(pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2))
def estimatePoly(self):
"""
Tries to estimate the polygon as a circle (very useful for environments
with many obstacles i.e. a random field of obstacles)
"""
## The average point in the polygon. Represents the
## center of the enclosing circle
self.avgPoint = map(
lambda p: p / len(self.nodes),
reduce(
lambda p1, p2: (
p1[0] + p2[0],
p1[1] + p2[1]
),
self.nodes
)
)
## The maximum distance from any vertex and the average point
self.maxDist = max(
[
self.norm(
p,
self.avgPoint
) for p in self.nodes
]
)
def detectCollision(self, pStart, pEnd):
"""
Detects a if there is a collision with the obstacle and
the line <pStart, pEnd>
@param pStart The starting point of the line
@param pEnd The ending point of the line
@return A boolean value representing if a collision occurred
"""
interCross = lambda p1, p2, q1, q2: (
(
(p1[0] - p2[0]) *
(q1[1] - p1[1]) -
(p1[1] - p2[1]) *
(q1[0] - p1[0])
) *
(
(p1[0] - p2[0]) *
(q2[1] - p1[1]) -
(p1[1] - p2[1]) *
(q2[0] - p1[0])
)
) < 0
interCheck = lambda p1, p2, q1, q2: (
max(p1[0], p2[0]) >= min(q1[0], q2[0]) and
max(q1[0], q2[0]) >= min(p1[0], p2[0]) and
max(p1[1], p2[1]) >= min(q1[1], q2[1]) and
max(q1[1], q2[1]) >= min(p1[1], p2[1])
)
vecList = [[self.nodes[0], self.nodes[-1]]]
for k in range(len(self.nodes) - 1):
vecList += [
[
self.nodes[k],
self.nodes[k + 1]
]
]
return any(
map(
lambda p: (
interCross(
p[0],
p[1],
pStart,
pEnd
) and
interCheck(
p[0],
p[1],
pStart,
pEnd
)
),
vecList
)
)
def getClosestPoint(self, a, b, p):
"""
Gets the closest point on line <a, b> to point p
@param a The starting point on the line
@param b The ending point of the line
@param p The point in which the closest distance will be checked
@return The closest point on line <a, b> to point p
"""
#pygame.draw.line(self.screen, self.colors["orange"], a, b, 4)
a = map(float, a)
b = map(float, b)
p = map(float, p)
xGreater = lambda r: r[0] >= max(a[0], b[0])
xLess = lambda r: r[0] <= min(a[0], b[0])
yGreater = lambda r: r[1] >= max(a[1], b[1])
yLess = lambda r: r[1] <= min(a[1], b[1])
if (
(xGreater(p) or xLess(p)) and
(yGreater(p) or yLess(p))
):
if self.norm(a, p) < self.norm(b, p):
return a
else:
return b
else:
#"""
a_to_p = [
float(p[0] - a[0]),
float(p[1] - a[1])
]
a_to_b = [
float(b[0] - a[0]),
float(b[1] - a[1])
]
atb2 = a_to_b[0] ** 2 + a_to_b[1] ** 2
atp_dot_atb = a_to_p[0] * a_to_b[0] + a_to_p[1] * a_to_b[1]
t = float(atp_dot_atb) / float(atb2)
retVal = (
float(a[0]) + a_to_b[0] * t,
float(a[1]) + a_to_b[1] * t
)
# This is unicorn magic, just freaking deal with it
if (
(xGreater(retVal) or xLess(retVal)) and
(yGreater(retVal) or yLess(retVal))
):
if self.norm(a, retVal) < self.norm(b, retVal):
return a
else:
return b
return retVal
#"""
#lam = -(a[0] * p[0] + a[1] * p[1]) / (p[0] * (b[0] - a[0]) + p[1] * (b[1] - a[1]))
#xk = (b[0] - a[0]) * lam + a[0]
#yk = (b[1] - a[1]) * lam + a[1]
#return (xk, yk)
def rayintersectseg(self, p, edge):
"""
Determines if a ray from point p intersects with an edge, edge.
Used to determine if a point p in inside the polygon
@param p The point to be checked
@param edge The edge that will be checked
@return True if a ray from point p intersects with edge and false
otherwise
"""
_eps = 0.00001
_huge = sys.float_info.max
_tiny = sys.float_info.min
a, b = edge
if a[1] > b[1]:
a, b = b, a
if p[1] == a[1] or p[1] == b[1]:
p = (p[0], p[1] + _eps)
intersect = False
if (p[1] > b[1] or p[1] < a[1]) or (p[0] > max(a[0], b[0])):
return False
if p[0] < min(a[0], b[0]):
intersect = True
else:
if abs(a[0] - b[0]) > _tiny:
m_red = (b[1] - a[1]) / float(b[0] - a[0])
else:
m_red = _huge
if abs(a[0] - p[0]) > _tiny:
m_blue = (p[1] - a[1]) / float(p[0] - a[0])
else:
m_blue = _huge
intersect = m_blue >= m_red
return intersect
def _odd(self, x):
"""
Determines if an integer, x, is odd
@param x The integer to be checked
@return True if x is odd, false otherwise
"""
return x % 2 == 1
def pointInPoly(self, p):
"""
Determines if a point p is inside the polygon represented
by this PolyObstacle object. It does this by checking the
number ray intersections that occur is odd or even. If the number
is odd, the point is inside the polygon, otherwise it is not.
@param p The point to be checked
@return True if the point is in the polygon and false otherwise
"""
vecList = [[self.nodes[0], self.nodes[-1]]]
for k in range(len(self.nodes) - 1):
vecList += [[self.nodes[k], self.nodes[k+1]]]
return self._odd(
sum(
self.rayintersectseg(p, edge) for edge in vecList
)
)
def pointAllowed(self, b, p):
"""
Checks if a point is allowed, meaning no collisions occur
@param b The boid object that will be checked
@param p The point that will be checked
@return True if allowed, false otherwise
"""
return (
(
self.norm(
self.getPoint(p),
p
) > b.radius
) and
(
not self.pointInPoly(p)
)
)
def getPoint(self, p):
"""
Gets the closest point from the polygon to p
@param p The point to be checked
@return The closest point that lies on the polygon exterior
to p
"""
vecList = list() # [[self.nodes[0],self.nodes[-1]]]
for k in range(-1, len(self.nodes) - 1):
vecList += [[self.nodes[k], self.nodes[k+1]]]
#print vecList
cpList = map(
lambda v: self.getClosestPoint(v[0], v[1], p),
vecList
)
dList = map(
lambda pv: self.norm(p, pv),
cpList
)
retVal = [
cpList[i] for i, j in enumerate(dList) if j == min(dList)
][0]
#pygame.draw.circle(self.screen, self.colors["green"], map(int, retVal), 5)
return retVal
def getRadius(self):
"""
Gets the 'radius' of the checking point. Only used for
conformity with circle obstacles that have not been included
in this repository
@return 1
"""
return 1
def checkCollisionWithOtherObstacles(self, node):
"""
Check to see if there is a collision with a static obstacle
"""
# check for every static obstacle's nodes
for obstacle in self.obstacles:
if obstacle.pointInPoly(node):
return obstacle
if self.norm(node, obstacle.getPoint(node)) <= 10:
return obstacle
return None
| |
self.t)
X0 += 0.00000000007 * math.cos(1.59729628741 + 29396.6128544381 * self.t)
X0 += 0.00000000007 * math.cos(4.06632874380 + 22779.6810636773 * self.t)
X0 += 0.00000000009 * math.cos(3.00366222190 + 64741.71416564769 * self.t)
X0 += 0.00000000007 * math.cos(5.95614295602 + 26290.4003542318 * self.t)
X0 += 0.00000000007 * math.cos(5.99066738237 + 25885.89356388359 * self.t)
X0 += 0.00000000009 * math.cos(3.58307674710 + 25863.31452838879 * self.t)
X0 += 0.00000000007 * math.cos(3.41090615216 + 13655.6166736929 * self.t)
X0 += 0.00000000007 * math.cos(3.03792135842 + 156740.96176236669 * self.t)
X0 += 0.00000000008 * math.cos(4.55619991306 + 78571.51086317569 * self.t)
X0 += 0.00000000007 * math.cos(3.14378763001 + 52663.6848518861 * self.t)
X0 += 0.00000000007 * math.cos(1.56777604890 + 2118.5200428949 * self.t)
X0 += 0.00000000008 * math.cos(4.91321996165 + 102872.98980971589 * self.t)
X0 += 0.00000000010 * math.cos(3.40349349020 + 2703.3723371921 * self.t)
X0 += 0.00000000007 * math.cos(0.43580348099 + 52074.08758236629 * self.t)
X0 += 0.00000000007 * math.cos(2.04943751114 + 209659.06869168568 * self.t)
X0 += 0.00000000007 * math.cos(3.67671851452 + 149846.43781352908 * self.t)
X0 += 0.00000000007 * math.cos(4.30875978873 + 77417.87040745489 * self.t)
X0 += 0.00000000009 * math.cos(1.15423490298 + 56260.03860733629 * self.t)
X0 += 0.00000000007 * math.cos(3.50070425163 + 117892.78995130445 * self.t)
X0 += 0.00000000009 * math.cos(2.88385440366 + 18073.9487561337 * self.t)
X0 += 0.00000000007 * math.cos(4.35051921098 + 917.6864684983 * self.t)
X0 += 0.00000000008 * math.cos(2.28326188901 + 32808.8609383991 * self.t)
X0 += 0.00000000008 * math.cos(1.76307802021 + 207114.39605479328 * self.t)
X0 += 0.00000000008 * math.cos(5.32581127623 + 3308.22207789689 * self.t)
X0 += 0.00000000007 * math.cos(2.91658615206 + 23267.3158149315 * self.t)
X0 += 0.00000000007 * math.cos(2.74703887915 + 28908.9781031839 * self.t)
X0 += 0.00000000007 * math.cos(3.11128439659 + 37.88921815429 * self.t)
X0 += 0.00000000007 * math.cos(6.13048599151 + 19202.50943415989 * self.t)
X0 += 0.00000000007 * math.cos(5.70115634056 + 52145.72323962749 * self.t)
X0 += 0.00000000008 * math.cos(4.37424371597 + 103395.56722780968 * self.t)
X0 += 0.00000000007 * math.cos(1.57841023127 + 208276.87075920108 * self.t)
X0 += 0.00000000009 * math.cos(1.81069664930 + 2014.73785431429 * self.t)
X0 += 0.00000000007 * math.cos(1.08271855123 + 63786.6020590061 * self.t)
X0 += 0.00000000007 * math.cos(2.83741634124 + 24712.3731592119 * self.t)
X0 += 0.00000000007 * math.cos(2.64521183634 + 127099.14709865468 * self.t)
X0 += 0.00000000008 * math.cos(0.10378895366 + 120417.92224525509 * self.t)
X0 += 0.00000000008 * math.cos(2.96158619363 + 103815.05187168489 * self.t)
X0 += 0.00000000007 * math.cos(0.99819510291 + 8989.22388794889 * self.t)
X0 += 0.00000000008 * math.cos(1.62417504708 + 579.4437695153 * self.t)
X0 += 0.00000000007 * math.cos(2.35955703224 + 1135.4330544097 * self.t)
X0 += 0.00000000006 * math.cos(5.07797134858 + 27574.1270801229 * self.t)
X0 += 0.00000000007 * math.cos(5.91237630109 + 102976.08258393449 * self.t)
X0 += 0.00000000006 * math.cos(3.83307059240 + 28774.8690535989 * self.t)
X0 += 0.00000000006 * math.cos(1.83055443881 + 23401.4248645165 * self.t)
X0 += 0.00000000006 * math.cos(1.38927552842 + 80463.04058549309 * self.t)
X0 += 0.00000000008 * math.cos(1.96535036231 + 51258.1198146501 * self.t)
X0 += 0.00000000006 * math.cos(3.61870647781 + 23704.95394428149 * self.t)
X0 += 0.00000000006 * math.cos(2.04491855340 + 28471.3399738339 * self.t)
X0 += 0.00000000006 * math.cos(1.68248146387 + 55618.1374106303 * self.t)
X0 += 0.00000000007 * math.cos(1.61837202878 + 53311.7269725251 * self.t)
X0 += 0.00000000007 * math.cos(5.53141035478 + 1802.6158082053 * self.t)
X0 += 0.00000000008 * math.cos(4.68334848992 + 102018.65999090829 * self.t)
X0 += 0.00000000007 * math.cos(5.32851714437 + 79995.0755951387 * self.t)
X0 += 0.00000000007 * math.cos(5.04612545544 + 52278.0126188975 * self.t)
X0 += 0.00000000008 * math.cos(4.88698976103 + 1162.23088692429 * self.t)
X0 += 0.00000000006 * math.cos(6.06855779639 + 53439.2074632101 * self.t)
X0 += 0.00000000007 * math.cos(1.80548735062 + 29580.7185259273 * self.t)
X0 += 0.00000000007 * math.cos(3.67914527744 + 27388.97377726269 * self.t)
X0 += 0.00000000007 * math.cos(1.98447975377 + 24787.3201408527 * self.t)
X0 += 0.00000000006 * math.cos(3.77150246945 + 106470.62024415868 * self.t)
X0 += 0.00000000007 * math.cos(2.88869753965 + 104127.51158807839 * self.t)
X0 += 0.00000000007 * math.cos(0.34091991598 + 27624.1237020271 * self.t)
X0 += 0.00000000007 * math.cos(5.32270511523 + 24552.1702160883 * self.t)
X0 += 0.00000000008 * math.cos(3.83070609952 + 948.9317914863 * self.t)
X0 += 0.00000000007 * math.cos(5.94865065189 + 6680.98103591609 * self.t)
X0 += 0.00000000008 * math.cos(0.77567524248 + 51861.86392076229 * self.t)
X0 += 0.00000000008 * math.cos(3.58552572007 + 51554.3062997927 * self.t)
X0 += 0.00000000007 * math.cos(4.23028089495 + 26720.44306340389 * self.t)
X0 += 0.00000000007 * math.cos(2.84651949085 + 52214.1831362697 * self.t)
X0 += 0.00000000007 * math.cos(5.58797516267 + 25572.68308796469 * self.t)
X0 += 0.00000000007 * math.cos(0.07564986854 + 26603.6108301507 * self.t)
X0 += 0.00000000007 * math.cos(5.38283352699 + 415.7963080956 * self.t)
X0 += 0.00000000008 * math.cos(1.12257158980 + 54191.0317724297 * self.t)
X0 += 0.00000000006 * math.cos(0.30285518092 + 24505.69997580769 * self.t)
X0 += 0.00000000007 * math.cos(4.29992052709 + 2974.85377209451 * self.t)
X0 += 0.00000000007 * math.cos(4.30362880096 + 48092.0615939275 * self.t)
X0 += 0.00000000006 * math.cos(3.84286985256 + 78043.54059976728 * self.t)
X0 += 0.00000000006 * math.cos(2.33701938638 + 19202.99706912689 * self.t)
X0 += 0.00000000006 * math.cos(1.70009289223 + 53661.3408073351 * self.t)
X0 += 0.00000000006 * math.cos(0.21405293391 + 104426.63798234759 * self.t)
X0 += 0.00000000006 * math.cos(0.26081525757 + 27883.4054027787 * self.t)
X0 += 0.00000000006 * math.cos(3.09307209285 + 156523.78133157069 * self.t)
X0 += 0.00000000008 * math.cos(5.86283230643 + 51120.60032370579 * self.t)
X0 += 0.00000000006 * math.cos(1.52305102135 + 226.07308589371 * self.t)
X0 += 0.00000000008 * math.cos(0.51879015762 + 48997.9043100643 * self.t)
X0 += 0.00000000006 * math.cos(4.68111037881 + 149756.95206568309 * self.t)
X0 += 0.00000000007 * math.cos(1.82650871102 + 4083.74468922089 * self.t)
X0 += 0.00000000008 * math.cos(3.80478888311 + 53259.1303329255 * self.t)
X0 += 0.00000000006 * math.cos(5.07036834716 + 130006.04778747768 * self.t)
X0 += 0.00000000007 * math.cos(1.36856874001 + 162810.73851692009 * self.t)
X0 += 0.00000000006 * math.cos(2.21234472657 + 27441.8957040745 * self.t)
X0 += 0.00000000006 * math.cos(3.45128030464 + 24734.3982140409 * self.t)
X0 += 0.00000000007 * math.cos(4.80045013467 + 55503.69812194508 * self.t)
X0 += 0.00000000008 * math.cos(3.49135266112 + 422.9098550964 * self.t)
X0 += 0.00000000006 * math.cos(0.40283917391 + 76777.97312114088 * self.t)
X0 += 0.00000000006 * math.cos(5.52356103113 + 155461.16718973868 * self.t)
X0 += 0.00000000006 * math.cos(4.38638116269 + 50161.0684288341 * self.t)
X0 += 0.00000000006 * math.cos(5.35041562795 + 78262.46876949779 * self.t)
X0 += 0.00000000006 * math.cos(3.67530335628 + 223.1041404771 * self.t)
X0 += 0.00000000006 * math.cos(3.76321444127 + 78225.82020656829 * self.t)
X0 += 0.00000000006 * math.cos(5.36628440371 + 76468.69479848508 * self.t)
X0 += 0.00000000006 * math.cos(4.67707783228 + 78597.8091829755 * self.t)
X0 += 0.00000000007 * math.cos(1.16259543363 + 30110.4094910219 * self.t)
X0 += 0.00000000006 * math.cos(3.49467754329 + 35077.61466449009 * self.t)
X0 += 0.00000000007 * math.cos(1.41920132868 + 1884.65734593389 * self.t)
X0 += 0.00000000007 * math.cos(4.54853659672 + 1082.8364148101 * self.t)
X0 += 0.00000000006 * math.cos(1.96577053321 + 5193.12324131429 * self.t)
X0 += 0.00000000006 * math.cos(5.17864942773 + 171292.90171019848 * self.t)
X0 += 0.00000000006 * math.cos(2.86805602516 + 25984.56654987209 * self.t)
X0 += 0.00000000006 * math.cos(4.20102757189 + 29864.5778447925 * self.t)
X0 += 0.00000000006 * math.cos(1.46259745932 + 22311.7160733229 * self.t)
X0 += 0.00000000006 * math.cos(4.38736983699 + 76991.27221657889 * self.t)
X0 += 0.00000000005 * math.cos(0.68949263605 + 27410.8246065891 * self.t)
X0 += 0.00000000005 * math.cos(4.97413239516 + 24765.4693115263 * self.t)
X0 += 0.00000000006 * math.cos(1.69366740663 + 51092.9698683383 * self.t)
X0 += 0.00000000007 * math.cos(3.84496680361 + 88476.75115345008 * self.t)
X0 += 0.00000000007 * math.cos(2.78025711521 + 34102.3451619817 * self.t)
X0 += 0.00000000005 * math.cos(1.26084797472 + 2686.4782770577 * self.t)
X0 += 0.00000000006 * math.cos(5.55737889155 + 31903.2578186947 * self.t)
X0 += 0.00000000006 * math.cos(0.10624613966 + 20273.0360994207 * self.t)
X0 += 0.00000000006 * math.cos(2.86083846875 + 104874.43380187408 * self.t)
X0 += 0.00000000005 * math.cos(2.16809698297 + 155475.39428374029 * self.t)
X0 += 0.00000000005 * math.cos(6.14186631974 + 128747.59385585209 * self.t)
X0 += 0.00000000005 * math.cos(5.05100044910 + 26500.51805593209 * self.t)
X0 += 0.00000000005 * math.cos(0.61262458211 + 25675.7758621833 * self.t)
X0 += 0.00000000007 * math.cos(4.13768447366 + 25234.46294233869 * self.t)
X0 += 0.00000000005 * math.cos(5.69197138572 + 53551.8239004777 * self.t)
X0 += 0.00000000005 * math.cos(0.34616736613 + 60170.5785426561 * self.t)
X0 += 0.00000000005 * math.cos(6.05655614068 + 27353.71443768409 * self.t)
X0 += 0.00000000007 * math.cos(1.18001608430 + 52602.16065654089 * self.t)
X0 += 0.00000000005 * math.cos(2.35031471718 + 128106.07549751546 * self.t)
X0 += 0.00000000005 * math.cos(2.02403333135 + 629.0954035336 * self.t)
X0 += 0.00000000005 * math.cos(1.31146516422 + 35191.56631820829 * self.t)
X0 += 0.00000000006 * math.cos(2.18238318065 + 3492.8153843531 * self.t)
X0 += 0.00000000007 * math.cos(4.81830760175 + 169093.81436691148 * self.t)
X0 += 0.00000000006 * math.cos(4.71794828628 + 81591.60126351927 * self.t)
X0 += 0.00000000006 * math.cos(4.84978213740 + 25462.4767667453 * self.t)
X0 += 0.00000000006 * math.cos(0.81384289381 + 26713.81715137009 * self.t)
X0 += 0.00000000007 * math.cos(2.97261820220 + 2641.5850959557 * self.t)
X0 += 0.00000000005 * math.cos(3.79651203289 + 58220.2786820073 * self.t)
X0 += 0.00000000005 * math.cos(2.70097411082 + 52813.0463726561 * self.t)
X0 += 0.00000000006 * math.cos(1.52077024889 + 156531.54400228668 * self.t)
X0 | |
<filename>safetypy/safetypy.py
# coding=utf-8
# Author: SafetyCulture
# Copyright: © SafetyCulture 2016
# pylint: disable=E1101
import collections
import json
import logging
import os
import re
import sys
import time
import errno
from builtins import input
from datetime import datetime
import requests
from getpass import getpass
DEFAULT_EXPORT_FORMAT = 'PDF'
GUID_PATTERN = '[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$'
HTTP_USER_AGENT_ID = 'safetyculture-python-sdk'
def get_user_api_token(logger, username, password):
"""
Generate iAuditor API Token
:param logger: the logger
:return: API Token if authenticated else None
"""
generate_token_url = "https://api.safetyculture.io/auth"
payload = "username=" + username + "&password=" + password + "&grant_type=password"
headers = {
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache",
}
response = requests.request("POST", generate_token_url, data=payload, headers=headers)
if response.status_code == requests.codes.ok:
return response.json()['access_token']
else:
logger.error('An error occurred calling ' + generate_token_url + ': ' + str(response.json()))
return None
class SafetyCulture:
def __init__(self, api_token):
self.current_dir = os.getcwd()
self.log_dir = self.current_dir + '/log/'
self.api_url = 'https://api.safetyculture.io/'
self.audit_url = self.api_url + 'audits'
self.template_search_url = self.api_url + 'templates/search?field=template_id&field=name'
self.response_set_url = self.api_url + 'response_sets'
self.get_my_groups_url = self.api_url + 'share/connections'
self.all_groups_url = self.api_url + 'groups'
self.add_users_url = self.api_url + 'users'
self.create_directory_if_not_exists(self.log_dir)
self.configure_logging()
logger = logging.getLogger('sp_logger')
try:
token_is_valid = re.match('^[<KEY>', api_token)
if token_is_valid:
self.api_token = api_token
else:
logger.error('API token failed to match expected pattern')
self.api_token = None
except Exception as ex:
self.log_critical_error(ex, 'API token is missing or invalid. Exiting.')
exit()
if self.api_token:
self.custom_http_headers = {
'User-Agent': HTTP_USER_AGENT_ID,
'Authorization': 'Bearer ' + self.api_token
}
else:
logger.error('No valid API token parsed! Exiting.')
sys.exit(1)
def authenticated_request_get(self, url):
return requests.get(url, headers=self.custom_http_headers)
def authenticated_request_post(self, url, data):
self.custom_http_headers['content-type'] = 'application/json'
response = requests.post(url, data, headers=self.custom_http_headers)
del self.custom_http_headers['content-type']
return response
def authenticated_request_put(self, url, data):
self.custom_http_headers['content-type'] = 'application/json'
response = requests.put(url, data, headers=self.custom_http_headers)
del self.custom_http_headers['content-type']
return response
def authenticated_request_delete(self, url):
return requests.delete(url, headers=self.custom_http_headers)
@staticmethod
def parse_json(json_to_parse):
"""
Parse JSON string to OrderedDict and return
:param json_to_parse: string representation of JSON
:return: OrderedDict representation of JSON
"""
return json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(json_to_parse.decode('utf-8'))
@staticmethod
def log_critical_error(ex, message):
"""
Write exception and description message to log
:param ex: Exception instance to log
:param message: Descriptive message to describe exception
"""
logger = logging.getLogger('sp_logger')
if logger is not None:
logger.critical(message)
logger.critical(ex)
def configure_logging(self):
"""
Configure logging to log to std output as well as to log file
"""
log_level = logging.DEBUG
log_filename = datetime.now().strftime('%Y-%m-%d') + '.log'
sp_logger = logging.getLogger('sp_logger')
sp_logger.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
fh = logging.FileHandler(filename=self.log_dir + log_filename)
fh.setLevel(log_level)
fh.setFormatter(formatter)
sp_logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(log_level)
sh.setFormatter(formatter)
sp_logger.addHandler(sh)
def create_directory_if_not_exists(self, path):
"""
Creates 'path' if it does not exist
If creation fails, an exception will be thrown
:param path: the path to ensure it exists
"""
try:
os.makedirs(path)
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
self.log_critical_error(ex, 'An error happened trying to create ' + path)
raise
def discover_audits(self, template_id=None, modified_after=None, completed=True):
"""
Return IDs of all completed audits if no parameters are passed, otherwise restrict search
based on parameter values
:param template_id: Restrict discovery to this template_id
:param modified_after: Restrict discovery to audits modified after this UTC timestamp
:param completed: Restrict discovery to audits marked as completed, default to True
:return: JSON object containing IDs of all audits returned by API
"""
logger = logging.getLogger('sp_logger')
last_modified = modified_after if modified_after is not None else '2000-01-01T00:00:00.000Z'
search_url = self.audit_url + '/search?field=audit_id&field=modified_at&order=asc&modified_after=' \
+ last_modified
log_string = '\nInitiating audit_discovery with the parameters: ' + '\n'
log_string += 'template_id = ' + str(template_id) + '\n'
log_string += 'modified_after = ' + str(last_modified) + '\n'
log_string += 'completed = ' + str(completed) + '\n'
logger.info(log_string)
if template_id is not None:
search_url += '&template=' + template_id
if completed is not False:
search_url += '&completed=true'
response = self.authenticated_request_get(search_url)
result = response.json() if response.status_code == requests.codes.ok else None
number_discovered = str(result['total']) if result is not None else '0'
log_message = 'on audit_discovery: ' + number_discovered + ' discovered using ' + search_url
self.log_http_status(response.status_code, log_message)
return result
def discover_templates(self, modified_after=None, modified_before=None):
"""
Query API for all template IDs if no parameters are passed, otherwise restrict search based on parameters
:param modified_after: Restrict discovery to templates modified after this UTC timestamp
:param modified_before: Restrict discovery to templates modified before this UTC timestamp
:return: JSON object containing IDs of all templates returned by API
"""
search_url = self.template_search_url
if modified_before is not None:
search_url += '&modified_before=' + modified_before
if modified_after is not None:
search_url += '&modified_after=' + modified_after
response = self.authenticated_request_get(search_url)
result = response.json() if response.status_code == requests.codes.ok else None
log_message = 'on template discovery using ' + search_url
self.log_http_status(response.status_code, log_message)
return result
def get_preference_ids(self, template_id=None):
"""
Query API for all preference IDs if no parameters are passed, else restrict to template_id passed
:param template_id: template_id to obtain export preferences for
:return: JSON object containing list of preference objects
"""
preference_search_url = self.api_url + 'preferences/search'
if template_id is not None:
preference_search_url += '?template_id=' + template_id
response = self.authenticated_request_get(preference_search_url)
result = response.json() if response.status_code == requests.codes.ok else None
return result
def get_export_job_id(self, audit_id, preference_id=None, export_format=DEFAULT_EXPORT_FORMAT):
"""
Request export job ID from API and return it
:param audit_id: audit_id to retrieve export_job_id for
:param preference_id: preference to apply to exports
:param export_format: desired format of exported document
:return: export job ID obtained from API
"""
export_url = '{0}/{1}/report'.format(self.audit_url, audit_id)
if export_format == 'docx': # convert old command line format
export_format = 'WORD'
export_data = {'format': export_format.upper()}
if preference_id is not None:
preference_id_pattern = '^template_[a-fA-F0-9]{32}:' + GUID_PATTERN
preference_id_is_valid = re.match(preference_id_pattern, preference_id)
if preference_id_is_valid:
export_data['preference_id'] = preference_id.split(':')[1]
else:
self.log_critical_error(ValueError,
'preference_id {0} does not match expected pattern'.format(
preference_id))
response = self.authenticated_request_post(export_url, data=json.dumps(export_data))
result = response.json() if response.status_code == requests.codes.ok else None
log_message = 'on request to ' + export_url
self.log_http_status(response.status_code, log_message)
return result
def poll_for_export(self, audit_id, export_job_id):
"""
Poll API for given export job until job is complete or excessive failed attempts occur
:param audit_id: audit_id of the export to poll for
:param export_job_id: export_job_id of the export to poll for
:return: href for export download
"""
job_id_pattern = '^' + GUID_PATTERN
job_id_is_valid = re.match(job_id_pattern, export_job_id)
if job_id_is_valid:
delay_in_seconds = 5
poll_url = '{0}/{1}/report/{2}'.format(self.audit_url, audit_id, export_job_id)
export_attempts = 1
poll_status = self.authenticated_request_get(poll_url)
status = poll_status.json()
logger = logging.getLogger('sp_logger')
if 'status' in status.keys():
if status['status'] == 'IN_PROGRESS':
logger.info(str(status['status']) + ' : ' + audit_id)
time.sleep(delay_in_seconds)
return self.poll_for_export(audit_id, export_job_id)
elif status['status'] == 'SUCCESS':
logger.info(str(status['status']) + ' : ' + audit_id)
return status['url']
else:
if export_attempts < 2:
export_attempts += 1
logger.info('attempt # {0} exporting report for: ' + audit_id.format(str(export_attempts)))
retry_id = self.get_export_job_id(audit_id)
return self.poll_for_export(audit_id, retry_id['messageId'])
else:
logger.error('export for ' + audit_id + ' failed {0} times - skipping'.format(export_attempts))
else:
logger.critical('Unexpected response from API: {0}'.format(status))
else:
self.log_critical_error(ValueError,
'export_job_id {0} does not match expected pattern'.format(export_job_id))
def download_export(self, export_href):
"""
:param export_href: href for export document to download
:return: String representation of exported document
"""
try:
response = self.authenticated_request_get(export_href)
result = response.content if response.status_code == requests.codes.ok else None
log_message = 'on GET for href: ' + export_href
self.log_http_status(response.status_code, log_message)
return result
except Exception as ex:
self.log_critical_error(ex, 'Exception occurred while attempting download_export({0})'.format(export_href))
def get_export(self, audit_id, preference_id=None, export_format=DEFAULT_EXPORT_FORMAT):
"""
Obtain exported document from API and return string representation of it
:param audit_id: audit_id of export to obtain
:param preference_id: ID of preference to apply to exports
:param export_format: desired format of exported document
:return: String representation of exported document
"""
export_job_id = self.get_export_job_id(audit_id, preference_id, export_format)['messageId']
export_href = self.poll_for_export(audit_id, export_job_id)
export_content = self.download_export(export_href)
return export_content
def get_media(self, audit_id, media_id):
"""
Get media item associated with a specified audit and media ID
:param audit_id: audit ID of document that contains media
:param media_id: media ID of image to fetch
:return: The Content-Type will be the MIME type associated with the media,
and the body of the response is the media itself.
"""
url = '{0}/{1}/media/{2}'.format(self.audit_url, audit_id, media_id)
response = requests.get(url, headers=self.custom_http_headers, stream=True)
return response
def get_web_report(self, audit_id):
"""
Generate Web Report link associated with a specified audit
:param audit_id: Audit ID
:return: Web Report link
"""
url = '{0}/{1}/web_report_link'.format(self.audit_url, audit_id)
response = self.authenticated_request_get(url)
result = self.parse_json(response.content) if response.status_code == requests.codes.ok else None
self.log_http_status(response.status_code, 'on GET web report for ' + audit_id)
if result:
return | |
import os
import csv
import math
import dnaio
import parasail
import editdistance
import re
import logging
import datetime
from .processor import FASTQProcessor, FASTQWorker
from ..fastq_pair import ReadPair
from ..fastq_file import IlluminaFASTQ, BarcodeStatistics
logger = logging.getLogger(__name__)
class DemultiplexWriter(dict):
"""A dictionary subclass holding barcode adapters and
the corresponding file-like objects for writing fastq.gz files.
This class is designed to write read pairs into FASTQ files based on the barcode.
In the dictionary:
Each key is a barcode.
The actual filenames are specified by the paired_end_filenames() method.
Each value is a file-like object returned by opening a pair of fastq.gz files.
Attributes:
barcode_dict: A dictionary mapping barcode to filename prefix.
prefix_dict: A dictionary mapping filename prefix to file-like object.
prefix_dict can be used to determine the files with certain prefix are opened.
This class supports context manager, for example:
with DemultiplexWriter(barcode_dict) as writer:
...PROCESSING CODE HERE...
writer.write(BARCODE, READ1, READ2)
...
"""
BARCODE_NOT_MATCHED = "NO_MATCH"
@staticmethod
def paired_end_filenames(prefix):
"""Maps a prefix to a 2-tuple of filenames (R1, R2)
This static method defines the output filenames.
Args:
prefix (str): Prefix for the filenames, including the full path.
Returns: A 2-tuple of strings as the filenames for R1 and R2 FASTQ files.
"""
return prefix + ".R1.fastq.gz", prefix + ".R2.fastq.gz"
def __init__(self, barcode_dict):
"""Initializes the writer with a dictionary mapping barcode to filename prefix.
Args:
barcode_dict: A dictionary mapping barcode to filename prefix
Each key is a barcode.
Each value is a prefix for output filename, including the full path.
The output file will contain the reads corresponds to the barcode.
If multiple barcodes are mapping to the same prefix,
the reads with those barcodes will be written into the same output file pair.
"""
self.barcode_dict = barcode_dict
self.prefix_dict = {}
super().__init__()
def open(self):
"""Opens the files for writing
"""
for barcode, prefix in self.barcode_dict.items():
if not prefix:
self[barcode] = None
if prefix in self.prefix_dict.keys():
self[barcode] = self.prefix_dict[prefix]
else:
r1_out, r2_out = DemultiplexWriter.paired_end_filenames(prefix)
fp = dnaio.open(r1_out, file2=r2_out, mode='w')
self.prefix_dict[prefix] = fp
self[barcode] = fp
return self
def close(self):
"""Closes the files
"""
for fp in self.values():
fp.close()
def write(self, barcode, read1, read2):
fp = self.get(barcode)
if not fp:
return
fp.write(read1, read2)
def __enter__(self):
return self.open()
def __exit__(self, exc_type, exc_val, exc_tb):
return self.close()
class DemultiplexWorker(FASTQWorker):
"""Represents a worker process for demultiplexing FASTQ reads
"""
DEFAULT_ERROR_RATE = 0.1
def __init__(self, barcode_dict, error_rate=None, score=1, penalty=10):
"""Initialize a demultiplex worker process.
Args:
barcode_dict: A dictionary mapping barcode to filename prefix.
The barcode_dict is used to initialize DemultiplexWriter.
error_rate: Max error rate allowed for a read to be considered as matching a barcode.
error_rate is used to determine the max distance allowed between the barcode and the read.
score: Score for one base pair match.
penalty: Penalty for one unit distance.
"""
super().__init__()
self.barcode_dict = barcode_dict
self.adapters = list(barcode_dict.keys())
self.min_match_length = round(min([len(adapter) / 2 for adapter in self.adapters]))
# Set the default values
self.error_rate = error_rate if error_rate else self.DEFAULT_ERROR_RATE
self.score = int(score) if str(score).isdigit() else 1
self.penalty = int(penalty) if str(penalty).isdigit() else 10
if not self.penalty:
raise ValueError("Mismatch penalty must be greater than 1.")
logger.debug("Process %s, Penalty: %s, Error Rate: %s, Score: %s" % (
os.getpid(), self.penalty, self.error_rate, self.score
))
self.score_matrix = self.create_score_matrix()
def create_score_matrix(self):
"""Creates a parasail score matrix for alignment
"""
return parasail.matrix_create("ACGTN", self.score, -1 * self.penalty)
def semi_global_distance(self, s1, s2):
score_matrix = self.create_score_matrix()
result = parasail.sg_de_stats(
s1, s2, self.penalty, self.penalty, score_matrix
)
return (self.score * result.matches - result.score) / self.penalty
def start(self, in_queue, out_queue):
"""Starts the demultiplexing to process reads from in_queue.
The number of reads processed are put into the out_queue for counting purpose.
Args:
in_queue: A queue holding list of reads to be processed.
Each item in the in_queue is a list reads so that the frequency of access the queue are reduced.
out_queue: A queue holding integers for counting purpose.
Returns:
"""
active_time = datetime.timedelta()
batch_count = 0
with DemultiplexWriter(self.barcode_dict) as writer:
while True:
reads = in_queue.get()
# Keep the starting time for each batch processing
timer_started = datetime.datetime.now()
if reads is None:
batch_time = (active_time / batch_count) if batch_count else 0
logger.debug("Process %s, Active time: %s (%s batches, %s/batch)." % (
os.getpid(), active_time, batch_count, batch_time
))
return self.counts
# results = []
for read_pair in reads:
barcode, read1, read2 = self.process_read_pair(read_pair)
writer.write(barcode, read1, read2)
# results.append(result)
self.add_count('total', len(reads))
batch_count += 1
# Add processing time for this batch
active_time += (datetime.datetime.now() - timer_started)
out_queue.put(len(reads))
def process_read_pair(self, read_pair):
"""Process the read pair
Sub-class should implement this method to return a 3-tuple, i.e.
(BARCODE, READ1, READ2)
"""
raise NotImplementedError
class DemultiplexInlineWorker(DemultiplexWorker):
"""Demultiplex FASTQ reads by Inline barcode at the beginning of the reads
self.counts will be a dictionary storing the demultiplex statistics, which includes the following keys:
matched: the number of read pairs matched at least ONE of the adapters
unmatched: the number of read pairs matching NONE of the adapters
total: total number of read pairs processed
Three keys for each adapter, i.e. BARCODE, BARCODE_1 and BARCODE_2
BARCODE stores the number of reads matching the corresponding adapter.
BARCODE_1 stores the number of forward reads (pair 1) matching the corresponding adapter.
BARCODE_2 stores the number of reverse-compliment reads (pair 2) matching the corresponding adapter.
For the values corresponding to BARCODE keys,
each READ PAIR will only be counted once even if it is matching multiple adapters.
The longer barcode will be used if the two reads in a pair is matching different adapters.
For the values corresponding to BARCODE_1 and BARCODE_2 keys,
each READ will be counted once.
The first barcode in self.adapters will be used if the read is matching multiple adapters.
"""
DEFAULT_ERROR_RATE = 0.2
def trim_adapters(self, read1, read2):
"""Checks if the beginning of the reads in a read pair matches any adapter.
If so, trim the reads to remove the matching adapter.
A read and an adapter are matched by using semi-global alignment without penalty at the end of the read.
They are considered as MATCHED if the number of substitutions and gaps
Args:
read1: Forward read.
read2: Reverse Compliment read.
read1 and read2 are dnaio Sequence objects.
They are passed into this method as references.
The modifications on read1 and read2 will be preserved after return.
The score_matrix will be generated if not specified.
However, if specified, it must be generated by:
score_matrix = parasail.matrix_create("ACGTN", self.score, -1 * self.penalty)
Pass a score_matrix avoid this method to generate it every time, which may speed up the overall processing.
Returns: A 2-tuple indicating whether any adapters are matching the read pair.
If a read matched an adapter, the matching adapter will be returned in the tuple.
Otherwise, the corresponding element in the tuple will be None.
For example, ("ACTGACT", None) indicates barcode "ACTGACT" is matching read1 (forward read).
See Also:
https://github.com/marcelm/dnaio/blob/master/src/dnaio/_core.pyx
https://github.com/jeffdaily/parasail-python#substitution-matrices
"""
# Trim both read1 and read2 with all adapters before return
reads = [read1, read2]
# Indicates whether R1 or R2 matches the adapter.
matched = [""] * len(reads)
for i in range(len(reads)):
read = reads[i]
for adapter in self.adapters:
result = parasail.sg_de_stats(
adapter, read.sequence[:20], self.penalty, self.penalty, self.score_matrix
)
if result.matches <= self.min_match_length:
continue
distance = (self.score * result.matches - result.score) / self.penalty
max_distance = math.floor(len(adapter) * self.error_rate)
if distance <= max_distance:
matched[i] = adapter
read.sequence = read.sequence[result.end_ref + 1:]
read.qualities = read.qualities[result.end_ref + 1:]
break
# read1 and read2 are preserved implicitly
return matched[0], matched[1]
def process_read_pair(self, read_pair):
read1, read2 = read_pair
# Initialize ReadPair to check if read1 and read2 are valid
read1, read2 = ReadPair(read1, read2).reads
# read1 and read2 are references
# The modifications on read1 and read2 will be returned implicitly
adapter1, adapter2 = self.trim_adapters(read1, read2)
if adapter1:
self.add_count("%s_1" % adapter1)
if adapter2:
self.add_count("%s_2" % adapter2)
# The longer adapter has higher priority
adapter = adapter1 if len(adapter1) > len(adapter2) else adapter2
if adapter:
# Count the number of reads matching the longer adapter
self.add_count(adapter)
# Sequence matched a barcode
| |
<gh_stars>1-10
import pygame
import random
from game.tools.asset_cache import getImage
import game.tools.constants as c
class Level:
"""Create a new level object.
This class should not be called directly. Only call its subclasses.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init Level using the lists of tuples rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal,
and goldTilesVertical.
Instance variables:
image: A None type object. Subclasses replace this with a Surface object of the image to be drawn for
the current level.
standardImage: A None type object. Subclasses replace this with a Surface object of the image to be
seen in standard play of the current level.
lightImage: A None type object. Subclasses replace this with a Surface object of a lighter variant of
the image to be seen in standard play of the current level.
Designed to be used when an ItemClock object is active, or to give the illusion of the level
flashing.
backgroundColor: A tuple indicating the color of the level's background.
activeRubberTraps: An empty list. Subclasses replace this with a list of tuples indicating which
columns and rows have horizontal rubber traps that begin the game in an active state.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: An empty list. Subclasses replace this with a list of tuples indicating which
columns and start with a black hole sprite.
itemTiles: An empty list. Subclasses replace this with a list of tuples indicating which columns and
rows can have items spawned on them.
levelBorderRects: An empty list. Subclasses replace this with a list of rect objects that form the
boundaries of the level.
isFlashing: A boolean indicating if the level should be in a flashing animation, switching between
its standardImage and lightImage.
frameCount: An integer that increases whenever the flashBoard method is called.
"""
self.image = self.standardImage = self.lightImage = None
self.backgroundColor = c.BLACK
self.rubberTilesHorizontal = rubberTilesHorizontal
self.rubberTilesVertical = rubberTilesVertical
self.goldTilesHorizontal = goldTilesHorizontal
self.goldTilesVertical = goldTilesVertical
self.activeRubberTraps = []
self.playerStartPosition = [(0, 0), (0, 0), (0, 0), (0, 0)]
self.blackHolePositions = []
self.itemTiles = []
self.levelBorderRects = []
self.isFlashing = False
self.frameCount = 0
def initialize(self):
"""Set the relevant variables of the level to their initial values."""
self.isFlashing = False
self.image = self.standardImage
self.frameCount = 0
def flashBoard(self):
"""Switch the level's image between standardImage and flashingImage every 6 frames."""
if self.isFlashing:
self.frameCount += 1
if self.frameCount % 12 < 6:
self.image = self.standardImage
else:
self.image = self.lightImage
class BoardOneLevel(Level):
"""Create a new object of the first variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardOneLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: A list of four tuples indicating which columns and rows each black hole sprite
starts on.
itemTiles: A list of tuples indicating which columns and rows can have items spawned on them.
This should include every tile that a player can reach, except those tiles that the players start
on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
super().__init__(rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_1A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_1B.png")
self.image = self.standardImage
self.backgroundColor = c.DARK_RED
self.playerStartPosition = [(1, 1), (9, 1), (2, 7), (8, 7)]
self.blackHolePositions = [(5, 4)]
self.itemTiles = [(x, y) for x in range(1, 10) for y in range(0, 8) if (x, y) not in self.playerStartPosition
and (x, y) not in self.blackHolePositions and (x, y) not in [(1, 0), (9, 0), (1, 7),
(9, 7)]]
self.levelBorderRects = [pygame.Rect(0, 0, 80, 84), pygame.Rect(0, 0, 512, 36), pygame.Rect(0, 0, 39, 448),
pygame.Rect(432, 0, 80, 84), pygame.Rect(477, 0, 39, 448),
pygame.Rect(0, 380, 80, 84), pygame.Rect(432, 380, 80, 84),
pygame.Rect(0, 426, 512, 36)]
class BoardTwoLevel(Level):
"""Create a new object of the second variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardTwoLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: A list of four tuples indicating which columns and rows each black hole sprite
starts on.
itemTiles: A list of tuples indicating which columns and rows can have items spawned on them.
This should include every tile that a player can reach, except those tiles that the players start
on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
super().__init__(rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_2A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_2B.png")
self.image = self.standardImage
self.backgroundColor = c.DARK_GREEN
self.playerStartPosition = [(4, 0), (6, 0), (1, 5), (9, 5)]
self.blackHolePositions = [(2, 6), (8, 6)]
self.itemTiles = [(x, y) for x in range(1, 10) for y in range(0, 8) if (x, y) not in self.playerStartPosition
and (x, y) not in self.blackHolePositions and (x, y) not in [(1, 0), (9, 0), (1, 7),
(9, 7)]]
self.levelBorderRects = [pygame.Rect(0, 0, 80, 84), pygame.Rect(0, 0, 512, 36), pygame.Rect(0, 0, 39, 448),
pygame.Rect(432, 0, 80, 84), pygame.Rect(477, 0, 39, 448),
pygame.Rect(0, 380, 80, 84), pygame.Rect(432, 380, 80, 84),
pygame.Rect(0, 426, 512, 36)]
class BoardThreeLevel(Level):
"""Create a new object of the third variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardThreeLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
| |
import discord
from discord.ext import commands
import asyncio
import aiohttp
import random
from datetime import datetime, timedelta
import math
import json
# #####################################################################################
# math parser used by $calc
class MathParser:
def __init__(self):
self.expression = ""
self.index = 0
self.vars = {}
self.funcs = ['cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'cosh', 'sinh', 'tanh', 'acosh', 'asinh', 'atanh', 'exp', 'ceil', 'abs', 'factorial', 'floor', 'round', 'trunc', 'log', 'log2', 'log10', 'sqrt', 'rad', 'deg']
def evaluate(self, expression = "", vars={}):
self.expression = expression.replace(' ', '').replace('\t', '').replace('\n', '').replace('\r', '')
self.index = 0
self.vars = {
'pi' : 3.141592653589793,
'e' : 2.718281828459045
}
self.vars = {**self.vars, **vars}
for func in self.funcs:
if func in self.vars: raise Exception("Variable name '{}' can't be used".format(func))
value = float(self.parse())
if self.isNotDone(): raise Exception("Unexpected character '{}' found at index {}".format(self.peek(), self.index))
epsilon = 0.0000000001
if int(value) == value: return int(value)
elif int(value + epsilon) != int(value):
return int(value + epsilon)
elif int(value - epsilon) != int(value):
return int(value)
return value
def isNotDone(self):
return self.index < len(self.expression)
def peek(self):
return self.expression[self.index:self.index + 1]
def parse(self):
values = [self.multiply()]
while True:
c = self.peek()
if c in ['+', '-']:
self.index += 1
if c == '-': values.append(- self.multiply())
else: values.append(self.multiply())
else:
break
return sum(values)
def multiply(self):
values = [self.parenthesis()]
while True:
c = self.peek()
if c in ['*', 'x']:
self.index += 1
values.append(self.parenthesis())
elif c in ['/', '%']:
div_index = self.index
self.index += 1
denominator = self.parenthesis()
if denominator == 0:
raise Exception("Division by 0 occured at index {}".format(div_index))
if c == '/': values.append(1.0 / denominator)
else: values.append(1.0 % denominator)
elif c == '^':
self.index += 1
exponent = self.parenthesis()
values[-1] = values[-1] ** exponent
elif c == '!':
self.index += 1
values[-1] = math.factorial(values[-1])
else:
break
value = 1.0
for factor in values: value *= factor
return value
def parenthesis(self):
if self.peek() == '(':
self.index += 1
value = self.parse()
if self.peek() != ')': raise Exception("No closing parenthesis found at character {}".format(self.index))
self.index += 1
return value
else:
return self.negative()
def negative(self):
if self.peek() == '-':
self.index += 1
return -1 * self.parenthesis()
else:
return self.value()
def value(self):
if self.peek() in '0123456789.':
return self.number()
else:
return self.variable_or_function()
def variable_or_function(self):
var = ''
while self.isNotDone():
c = self.peek()
if c.lower() in '_abcdefghijklmnopqrstuvwxyz0123456789':
var += c
self.index += 1
else:
break
value = self.vars.get(var, None)
if value == None:
if var not in self.funcs: raise Exception("Unrecognized variable '{}'".format(var))
else:
param = self.parenthesis()
if var == 'cos': value = math.cos(param)
elif var == 'sin': value = math.sin(param)
elif var == 'tan': value = math.tan(param)
elif var == 'acos': value = math.acos(param)
elif var == 'asin': value = math.asin(param)
elif var == 'atan': value = math.atan(param)
elif var == 'cosh': value = math.cosh(param)
elif var == 'sinh': value = math.sinh(param)
elif var == 'tanh': value = math.tanh(param)
elif var == 'acosh': value = math.acosh(param)
elif var == 'asinh': value = math.asinh(param)
elif var == 'atanh': value = math.atanh(param)
elif var == 'exp': value = math.exp(param)
elif var == 'ceil': value = math.ceil(param)
elif var == 'floor': value = math.floor(param)
elif var == 'round': value = math.floor(param)
elif var == 'factorial': value = math.factorial(param)
elif var == 'abs': value = math.fabs(param)
elif var == 'trunc': value = math.trunc(param)
elif var == 'log':
if param <= 0: raise Exception("Can't evaluate the logarithm of '{}'".format(param))
value = math.log(param)
elif var == 'log2':
if param <= 0: raise Exception("Can't evaluate the logarithm of '{}'".format(param))
value = math.log2(param)
elif var == 'log10':
if param <= 0: raise Exception("Can't evaluate the logarithm of '{}'".format(param))
value = math.log10(param)
elif var == 'sqrt': value = math.sqrt(param)
elif var == 'rad': value = math.radians(param)
elif var == 'deg': value = math.degrees(param)
else: raise Exception("Unrecognized function '{}'".format(var))
return float(value)
def number(self):
strValue = ''
decimal_found = False
c = ''
while self.isNotDone():
c = self.peek()
if c == '.':
if decimal_found:
raise Exception("Found an extra period in a number at character {}".format(self.index))
decimal_found = True
strValue += '.'
elif c in '0123456789':
strValue += c
else:
break
self.index += 1
if len(strValue) == 0:
if c == '': raise Exception("Unexpected end found")
else: raise Exception("A number was expected at character {} but instead '{}' was found".format(self.index, char))
return float(strValue)
# #####################################################################################
# Cogs
class General(commands.Cog):
"""General commands."""
def __init__(self, bot):
self.bot = bot
self.color = 0x8fe3e8
def startTasks(self):
self.bot.runTask('reminder', self.remindertask)
async def remindertask(self):
while True:
if self.bot.exit_flag: return
try:
c = self.bot.getJST() + timedelta(seconds=30)
for r in list(self.bot.reminders.keys()):
di = 0
u = self.bot.get_user(int(r))
if u is None: continue
while di < len(self.bot.reminders[r]):
if c > self.bot.reminders[r][di][0]:
try:
await u.send(embed=self.bot.buildEmbed(title="Reminder", description=self.bot.reminders[r][di][1]))
except Exception as e:
await self.bot.sendError('remindertask', "User: {}\nReminder: {}\nError: {}".format(u.name, self.bot.reminders[r][di][1], e))
self.bot.reminders[r].pop(di)
self.bot.savePending = True
else:
di += 1
if len(self.bot.reminders[r]) == 0:
self.bot.reminders.pop(r)
self.bot.savePending = True
except asyncio.CancelledError:
await self.bot.sendError('remindertask', 'cancelled')
return
except Exception as e:
await self.bot.sendError('remindertask', str(e))
await asyncio.sleep(200)
await asyncio.sleep(40)
def isDisabled(): # for decorators
async def predicate(ctx):
return False
return commands.check(predicate)
def isAuthorized(): # for decorators
async def predicate(ctx):
return ctx.bot.isAuthorized(ctx)
return commands.check(predicate)
# get a 4chan thread
async def get4chan(self, board : str, search : str): # be sure to not abuse it, you are not supposed to call the api more than once per second
try:
search = search.lower()
url = 'http://a.4cdn.org/{}/catalog.json'.format(board) # board catalog url
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if r.status == 200:
data = await r.json()
threads = []
for p in data:
for t in p["threads"]:
try:
if t["sub"].lower().find(search) != -1 or t["com"].lower().find(search) != -1:
threads.append([t["no"], t["replies"]]) # store the thread ids matching our search word
except:
pass
threads.sort(reverse=True)
return threads
except:
return []
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorized()
async def roll(self, ctx, dice : str = ""):
"""Rolls a dice in NdN format."""
try:
rolls, limit = map(int, dice.split('d'))
result = ", ".join(str(random.randint(1, limit)) for r in range(rolls))
await ctx.send(embed=self.bot.buildEmbed(title="{}'s dice Roll(s)".format(ctx.message.author.display_name), description=result, color=self.color))
except:
await ctx.send(embed=self.bot.buildEmbed(title="Format has to be in NdN", footer="example: roll 2d6", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['choice'])
@isAuthorized()
@commands.cooldown(2, 10, commands.BucketType.guild)
async def choose(self, ctx, *, choices : str ):
"""Chooses between multiple choices.
Use quotes if one of your choices contains spaces.
Example: $choose I'm Alice ; Bob"""
try:
possible = choices.split(";")
if len(possible) < 2: raise Exception()
await ctx.send(embed=self.bot.buildEmbed(title="{}, I choose".format(ctx.message.author.display_name), description=random.choice(possible), color=self.color))
except:
await ctx.send(embed=self.bot.buildEmbed(title="Give me a list of something to choose from 😔, separated by ';'", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['math'])
@commands.cooldown(2, 10, commands.BucketType.guild)
async def calc(self, ctx, *terms : str):
"""Process a mathematical expression
You can define a variable by separating using a comma.
Some functions are also available.
Example: cos(a + b) / c, a = 1, b=2,c = 3"""
try:
m = " ".join(terms).split(",")
d = {}
for i in range(1, len(m)): # process the variables if any
x = m[i].replace(" ", "").split("=")
if len(x) == 2: d[x[0]] = float(x[1])
else: raise Exception('')
msg = "{} = **{}**".format(m[0], MathParser().evaluate(m[0], d))
if len(d) > 0:
msg += "\nwith:\n"
for k in d:
msg += "{} = {}\n".format(k, d[k])
await ctx.send(embed=self.bot.buildEmbed(title="Calculator", description=msg, color=self.color))
except Exception as e:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description=str(e), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@commands.cooldown(1, 5, commands.BucketType.guild)
async def jst(self, ctx):
"""Post the current time, JST timezone"""
await ctx.send(embed=self.bot.buildEmbed(title="{} {:%Y/%m/%d %H:%M} JST".format(self.bot.getEmote('clock'), self.bot.getJST()), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, alias=['inrole', 'rolestat'])
@isAuthorized()
async def roleStats(self, ctx, *name : str):
"""Search how many users have a matching role
use quotes if your match contain spaces
add 'exact' at the end to force an exact match"""
g = ctx.author.guild
i = 0
if len(name) > 0 and name[-1] == "exact":
exact = True
name = name[:-1]
else:
exact = False
name = ' '.join(name)
for member in g.members:
for r in member.roles:
if r.name == name or (exact == False and r.name.lower().find(name.lower()) != -1):
i += 1
if exact != "exact":
await ctx.send(embed=self.bot.buildEmbed(title="Roles containing: {}".format(name), description="{} user(s)".format(i), thumbnail=g.icon_url, footer="on server {}".format(g.name), color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="Roles matching: {}".format(name), description="{} user(s)".format(i), | |
specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the Secret or its key must be defined
"""
if items is not None:
pulumi.set(__self__, "items", items)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def items(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterAgentConfigVolumesProjectedSourcesSecretItems']]:
"""
If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
"""
return pulumi.get(self, "items")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigVolumesProjectedSourcesSecretItems(dict):
"""
Maps a string key to a path within a volume.
"""
def __init__(__self__, *,
key: str,
path: str,
mode: Optional[int] = None):
"""
Maps a string key to a path within a volume.
:param str key: The key to project.
:param str path: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
:param int mode: Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "path", path)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def key(self) -> str:
"""
The key to project.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def path(self) -> str:
"""
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def mode(self) -> Optional[int]:
"""
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigVolumesProjectedSourcesServiceAccountToken(dict):
"""
information about the serviceAccountToken data to project
"""
def __init__(__self__, *,
path: str,
audience: Optional[str] = None,
expiration_seconds: Optional[int] = None):
"""
information about the serviceAccountToken data to project
:param str path: Path is the path relative to the mount point of the file to project the token into.
:param str audience: Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.
:param int expiration_seconds: ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.
"""
pulumi.set(__self__, "path", path)
if audience is not None:
pulumi.set(__self__, "audience", audience)
if expiration_seconds is not None:
pulumi.set(__self__, "expiration_seconds", expiration_seconds)
@property
@pulumi.getter
def path(self) -> str:
"""
Path is the path relative to the mount point of the file to project the token into.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def audience(self) -> Optional[str]:
"""
Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.
"""
return pulumi.get(self, "audience")
@property
@pulumi.getter(name="expirationSeconds")
def expiration_seconds(self) -> Optional[int]:
"""
ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.
"""
return pulumi.get(self, "expiration_seconds")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigVolumesQuobyte(dict):
"""
Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
"""
def __init__(__self__, *,
registry: str,
volume: str,
group: Optional[str] = None,
read_only: Optional[bool] = None,
tenant: Optional[str] = None,
user: Optional[str] = None):
"""
Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
:param str registry: Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:param str volume: Volume is a string that references an already created Quobyte volume by name.
:param str group: Group to map volume access to Default is no group
:param bool read_only: ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:param str tenant: Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin
:param str user: User to map volume access to Defaults to serivceaccount user
"""
pulumi.set(__self__, "registry", registry)
pulumi.set(__self__, "volume", volume)
if group is not None:
pulumi.set(__self__, "group", group)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if tenant is not None:
pulumi.set(__self__, "tenant", tenant)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def registry(self) -> str:
"""
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
"""
return pulumi.get(self, "registry")
@property
@pulumi.getter
def volume(self) -> str:
"""
Volume is a string that references an already created Quobyte volume by name.
"""
return pulumi.get(self, "volume")
@property
@pulumi.getter
def group(self) -> Optional[str]:
"""
Group to map volume access to Default is no group
"""
return pulumi.get(self, "group")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
"""
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
"""
return pulumi.get(self, "read_only")
@property
@pulumi.getter
def tenant(self) -> Optional[str]:
"""
Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin
"""
return pulumi.get(self, "tenant")
@property
@pulumi.getter
def user(self) -> Optional[str]:
"""
User to map volume access to Defaults to serivceaccount user
"""
return pulumi.get(self, "user")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigVolumesRbd(dict):
"""
RBD represents a Rados Block Device mount on the host | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
try:
import hdf5plugin # for P10, should be imported before h5py or PyTables
except ModuleNotFoundError:
pass
import numpy as np
import xrayutilities as xu
import scipy.signal # for medfilt
import matplotlib.pyplot as plt
from scipy.interpolate import RegularGridInterpolator
import sys
import tkinter as tk
from tkinter import filedialog
from numpy.fft import fftn, fftshift
import gc
import bcdi.graph.graph_utils as gu
from bcdi.experiment.detector import Detector
from bcdi.experiment.setup import Setup
import bcdi.facet_recognition.facet_utils as fu
import bcdi.preprocessing.preprocessing_utils as pru
import bcdi.simulation.simulation_utils as simu
import bcdi.utils.utilities as util
helptext = """
Stereographic projection of a measured 3D diffraction pattern or calculated from a
real-space BCDI reconstruction. A shell dq of reciprocal space located a radius_mean
(in q) from the Bragg peak is projected from the South pole and the North onto the
equatorial plane.
The coordinate system follows the CXI convention: Z downstream, Y vertical up and X
outboard. Q values follow the more classical convention: qx downstream, qz vertical
up, qy outboard.
"""
######################
# generic parameters #
######################
scan = 78 # spec scan number
root_folder = "D:/data/Pt THH ex-situ/Data/HS4670/"
sample_name = "S" # "S" #
comment = ""
reflection = np.array([0, 2, 0]) # np.array([0, 0, 2]) # # reflection measured
projection_axis = 1 # the projection will be performed on the equatorial plane
# perpendicular to that axis (0, 1 or 2)
radius_mean = 0.030 # q from Bragg peak
dq = 0.001 # width in q of the shell to be projected
sample_offsets = None # tuple of offsets in degrees of the sample
# for each sample circle (outer first).
# the sample offsets will be subtracted to the motor values. Leave None if no offset.
q_offset = [
0,
0,
0,
] # offset of the projection plane in [qx, qy, qz] (0 = equatorial plane)
# q_offset applies only to measured diffraction pattern
# (not obtained from a reconstruction)
photon_threshold = 0 # threshold applied to the measured diffraction pattern
contour_range = None # range(250, 2600, 250)
# range for the plot contours range(min, max, step), leave it to None for default
max_angle = 100 # maximum angle in degrees of the stereographic projection
# (should be larger than 90)
medianfilter_kernel = 3 # size in each dimension of the 3D kernel for median filtering,
# leave None otherwise
plot_planes = True # if True, plot dotted circles corresponding to
# planes_south and planes_north indices
hide_axis = (
False # if True, the default axis frame, ticks and ticks labels will be hidden
)
planes_south = {} # create dictionnary for the projection from the South pole,
# the reference is +reflection
planes_south["0 2 0"] = simu.angle_vectors(
ref_vector=reflection, test_vector=np.array([0, 2, 0])
)
planes_south["1 1 1"] = simu.angle_vectors(
ref_vector=reflection, test_vector=np.array([1, 1, 1])
)
# planes_south['1 0 0'] =
# simu.angle_vectors(ref_vector=reflection, test_vector=np.array([1, 0, 0]))
# planes_south['1 0 0'] =
# simu.angle_vectors(ref_vector=reflection, test_vector=np.array([1, 0, 0]))
# planes_south['1 1 0'] =
# simu.angle_vectors(ref_vector=reflection, test_vector=np.array([1, 1, 0]))
# planes_south['-1 1 0'] =
# simu.angle_vectors(ref_vector=reflection, test_vector=np.array([-1, 1, 0]))
# planes_south['1 -1 1'] =
# simu.angle_vectors(ref_vector=reflection, test_vector=np.array([1, -1, 1]))
# planes_south['-1 -1 1'] =
# simu.angle_vectors(ref_vector=reflection, test_vector=np.array([-1, -1, 1]))
planes_south["1 2 0"] = simu.angle_vectors(
ref_vector=reflection, test_vector=np.array([1, 2, 0])
)
planes_south["2 1 0"] = simu.angle_vectors(
ref_vector=reflection, test_vector=np.array([2, 1, 0])
)
planes_south["2 0 1"] = simu.angle_vectors(
ref_vector=reflection, test_vector=np.array([2, 0, 1])
)
planes_north = {} # create dictionnary for the projection from the North pole,
# the reference is -reflection
planes_north["0 -2 0"] = simu.angle_vectors(
ref_vector=-reflection, test_vector=np.array([0, -2, 0])
)
planes_north["-1 -1 -1"] = simu.angle_vectors(
ref_vector=-reflection, test_vector=np.array([-1, -1, -1])
)
# planes_north['-1 0 0'] =
# simu.angle_vectors(ref_vector=-reflection, test_vector=np.array([-1, 0, 0]))
# planes_north['-1 -1 0'] =
# simu.angle_vectors(ref_vector=-reflection, test_vector=np.array([-1, -1, 0]))
# planes_north['-1 1 0'] =
# simu.angle_vectors(ref_vector=-reflection, test_vector=np.array([-1, 1, 0]))
# planes_north['-1 -1 1'] =
# simu.angle_vectors(ref_vector=-reflection, test_vector=np.array([-1, -1, 1]))
# planes_north['-1 1 1'] =
# simu.angle_vectors(ref_vector=-reflection, test_vector=np.array([-1, 1, 1]))
planes_north["1 -2 0"] = simu.angle_vectors(
ref_vector=-reflection, test_vector=np.array([1, -2, 0])
)
planes_north["2 -1 0"] = simu.angle_vectors(
ref_vector=-reflection, test_vector=np.array([2, -1, 0])
)
planes_north["2 0 1"] = simu.angle_vectors(
ref_vector=-reflection, test_vector=np.array([2, 0, 1])
)
debug = False # True to show more plots, False otherwise
########################################################
# parameters for plotting the stereographic projection #
# starting from the phased real space object only #
########################################################
reconstructed_data = (
False # set it to True if the data is a BCDI reconstruction (real space)
)
# the reconstruction should be in the crystal orthogonal frame
threshold_amp = (
0.3 # threshold for support determination from amplitude, if reconstructed_data=1
)
use_phase = (
False # set to False to use only a support, True to use the complex amplitude
)
binary_support = (
False # if True, the modulus of the reconstruction will be set to a binary support
)
phase_factor = -1 # 1, -1, -2*np.pi/d depending on what is in the field phase
# (phase, -phase, displacement...)
voxel_size = [
3.0,
3.0,
3.0,
] # in nm, voxel size of the CDI reconstruction in each directions. Put [] if unknown
pad_size = [
2,
2,
2,
] # list of three int >= 1, will pad to get this number times the initial array size
# voxel size does not change, hence it corresponds to upsampling the diffraction pattern
upsampling_ratio = 2 # int >=1, upsample the real space object by this factor
# (voxel size divided by upsampling_ratio)
# it corresponds to increasing the size of the detector while keeping
# detector pixel size constant
#################################################################################
# define beamline related parameters, not used for the phased real space object #
#################################################################################
beamline = (
"ID01" # name of the beamline, used for data loading and normalization by monitor
)
# supported beamlines: 'ID01', 'SIXS_2018', 'SIXS_2019', 'CRISTAL', 'P10'
custom_scan = False # True for a stack of images acquired without scan,
# e.g. with ct in a macro (no info in spec file)
custom_images = (
None # np.arange(11665, 11764, 1) # list of image numbers for the custom_scan
)
custom_monitor = None # np.ones(len(custom_images))
# monitor values for normalization for the custom_scan
custom_motors = None
# {"eta": np.linspace(16.989, 18.969596, num=100, endpoint=False),
# "phi": 0, "nu": -0.75, "delta": 35.978}
# ID01: eta, phi, nu, delta
# CRISTAL: mgomega, gamma, delta
# P10: om, phi, chi, mu, gamma, delta
# SIXS: beta, mu, gamma, delta
rocking_angle = "outofplane" # "outofplane" or "inplane" or "energy"
follow_bragg = False # only for energy scans, set to True if the detector was
# also scanned to follow the Bragg peak
specfile_name = "psic_nano_20141204"
# .spec for ID01, .fio for P10, alias_dict.txt for SIXS_2018,
# not used for CRISTAL and SIXS_2019
# template for ID01: name of the spec file without '.spec'
# template for SIXS_2018: full path of the alias dictionnary,
# typically root_folder + 'alias_dict_2019.txt'
# template for SIXS_2019: ''
# template for P10: sample_name + '_%05d'
# template for CRISTAL: ''
filtered_data = True # set to True if the data is already a 3D array, False otherwise
is_orthogonal = False # True is the filtered_data is already orthogonalized,
# q values need to be provided
normalize_flux = "skip" # 'monitor' to normalize the intensity by the default
# monitor values, 'skip' to do nothing
#######################################################
# define detector related parameters and region of #
# interest, not used for the phased real space object #
#######################################################
detector = "Maxipix" # "Eiger2M" or "Maxipix" or "Eiger4M"
# x_bragg = 451 # horizontal pixel number of the Bragg peak
# y_bragg = 1450 # vertical pixel number of the Bragg peak
# roi_detector = [1202, 1610, x_bragg - 256, x_bragg + 256] # HC3207 x_bragg = 430
roi_detector = [] # [y_bragg - 290, y_bragg + 350, x_bragg - 350, x_bragg + 350] # Ar
# roi_detector = [552, 1064, x_bragg - 240, x_bragg + 240] # P10 2018
# leave it as [] to use the full detector.
# Use with center_fft='do_nothing' if you want this exact size.
hotpixels_file = "" # root_folder + 'hotpixels.npz' #
flatfield_file = root_folder + "flatfield_maxipix_8kev.npz" #
template_imagefile = "Pt4_%04d.edf" # .gz'
# template for ID01: 'data_mpx4_%05d.edf.gz' or 'align_eiger2M_%05d.edf.gz'
# template for SIXS_2018: 'align.spec_ascan_mu_%05d.nxs'
# template for SIXS_2019: 'spare_ascan_mu_%05d.nxs'
# template for Cristal: 'S%d.nxs'
# template for P10: '_data_%06d.h5'
binning = [
1,
1,
1,
] # binning to apply to the measured diffraction pattern in each dimension
###################################################################
# define parameters for xrayutilities, used for orthogonalization #
# not used for the phased real space object #
###################################################################
# xrayutilities uses the xyz crystal frame: for incident angle = 0,
# x is downstream, y outboard, and z vertical up
sdd = (
1.26 # 0.865 # sample to detector distance in m, not important if you use raw data
)
energy = 9000 # x-ray energy in eV, not important if | |
if self.showDrop: desc.height = self.height - img.height - dropButton.height - 30
elif self.showBuy: desc.height = self.height - img.height - buyButton.height - 30
elif self.showSell: desc.height = self.height - img.height - sellButton.height - 30
else: desc.height = self.height - img.height - 30
desc.text = self.item.description
desc.x = 10
desc.y = img.y + img.height + 5
self.add(desc)
self.show = False
self.renderOnTop = True
self.inMotion = False
def dropButtonCb(self, event, button):
self.item.drop()
print "Clicked drop button on item: %s" % self.item.name
# for iic in self.ancestor.inventory._children:
# iibc = iic._children[0]
# if iibc.item == self.item:
# if iic.item.count == 0:
# self.ancestor.inventory.remove(iic)
# self.ancestor.remove(iic)
# self.ancestor.resetInventory()
# self.show = False
#
# break
def buyButtonCb(self, event, button):
print "Buying %s for %s" % (self.item.name, self.item.cost)
def sellButtonCb(self, event, button):
print "Selling %s for %s" % (self.item.name, self.item.cost)
def mouseEnter(self, mousePos):
DialogBubble.mouseEnter(self, mousePos)
self.inMotion = True
def mouseLeave(self, mousePos):
DialogBubble.mouseLeave(self, mousePos)
self.inMotion = False
self.show = False
def mouseMotion(self, mousePos):
DialogBubble.mouseMotion(self, mousePos)
self.inMotion = True
def forceHide(self):
self.show = False
self.inMotion = False
def hideUnlessActive(self):
if not self.inMotion: self.show = False
class StatsDisplay(BorderedControl):
def __init__(self, character, **kwargs):
BorderedControl.__init__(self, **kwargs)
self.backgroundColor = 'backgroundColor' in kwargs and kwargs['backgroundColor'] or (255,255,255)
if "editable" in kwargs: self.editable = kwargs["editable"]
else: self.editable = False
self.character = character
self.font = pygame.font.Font(None, 22)
self._initComponents()
print "width/height: %d/%d" % (self.width, self.height)
def __repr__(self): return "Stats Display"
def _initComponents(self):
self.children = []
# Stats Labels
self.statLabels = {}
dy = 5
max_width = 200
for stat in self.character.stats.keys():
statLabel = Label(text="%s:" % stat, parent=self, width=80, fontColor=(0,0,0))
statLabel.x = 10
statLabel.y = dy
max_width = max(statLabel.width, max_width)
self.add(statLabel)
dy += statLabel.height
for substat in self.character.stats[stat].keys():
substatLabel = Label(text="%s" % substat, parent=self, width=120, font=self.font, fontColor=(0x99, 0x99, 0x99))
substatLabel.x = statLabel.x + 10
substatLabel.y = dy
substatLabel.clicked = self.statClicked
substatValue = Label(text="%s" % self.character.stats[stat][substat], parent=self, width=50, xalignment="right", font=self.font, fontColor=(0x99,0x99,0x99))
substatValue.x = substatLabel.x + substatLabel.width
substatValue.y = substatLabel.y
max_width = max(max_width, substatLabel.width+substatValue.width+50)
self.add(substatLabel)
self.add(substatValue)
self.statLabels[substatLabel] = substatValue
dy += substatValue.height
dy += 10
self.width = max_width
if dy > self.height: self.height = dy
def statClicked(self, event, control):
if self.editable:
id = InputDialog("Enter new value for %s:" % control.text)
id.run()
if id.result:
try:
self.character.stats.setSubstatValue(control.text, int(id.textInput.text))
self.statLabels[control].text = id.textInput.text
except Exception, e: print 'Could not set %s to "%s" (%s)' % (control.text, id.textInput.text, e)
class CharacterScreen(Application):
"""Shows a character's attributes, vitals, statistics, inventory, etc.
and optionally allows editing of those values.
"""
def __init__(self, character, **kwargs):
Application.__init__(self, **kwargs)
self.backgroundColor = kwargs['backgroundColor'] if 'backgroundColor' in kwargs else (0,0,0,0)
self.editable = kwargs['editable'] if "editable" in kwargs else False
self.character = character
self.initControls()
def initControls(self):
"Create and add all controls to the app."
fnt = pygame.font.Font(configuration.monofont, 15)
fnt.set_bold(True)
self.border = BorderedControl(parent=self, pos=(20,30), width=self.width-40, height=self.height-40)
self.border.borderWidth = 2
self.closeButton = Button(text="X", callback=self.exit, borderWidth=2, parent=self, font=pygame.font.Font(None, 15))
self.closeButton.width = self.closeButton.height
self.closeButton.x = self.width-self.closeButton.width-3
self.closeButton.y = self.y+2
self.add(self.closeButton)
# Sprite picture
self.spriteImage = Image(parent=self.border, surface=self.character._spriteset[2][0], pos=(10,10))
if self.editable: self.spriteImage.clicked = self.spritesetChangeCb
self.border.add(self.spriteImage)
# Name label
self.nameLabel = Label(text=self.character.name, parent=self.border, font=fnt)
self.nameLabel.x = self.spriteImage.right + 10
self.nameLabel.y = self.spriteImage.y
self.nameLabel.width = 200
if self.editable: self.nameLabel.clicked = self.nameChangeCb
self.border.add(self.nameLabel)
# Job Label
if not self.character.job: jobtxt = "Jobless"
else: jobtxt = repr(self.character.job)
f = pygame.font.Font(configuration.regfont, 15)
f.set_italic(True)
f.set_bold(True)
self.jobLabel = Label(text=jobtxt, parent=self.border, font=f)
self.jobLabel.x = self.nameLabel.right + 10
self.jobLabel.y = self.nameLabel.y
self.border.add(self.jobLabel)
# Level label
self.levelLabel = Label(text="Level %d" % self.character.level, parent=self.border, font=fnt)
self.levelLabel.x = self.nameLabel.x
self.levelLabel.y = self.nameLabel.bottom
if self.editable: self.levelLabel.clicked = self.levelChangeCb
self.border.add(self.levelLabel)
# HP Label
self.hpLabel = HpCurMaxLabel(self.character, parent=self.border, font=fnt)
self.hpLabel.x = self.spriteImage.right + 10
self.hpLabel.y = self.levelLabel.y + self.levelLabel.height
self.border.add(self.hpLabel)
# AP Label
self.apLabel = ApCurMaxLabel(self.character, parent=self.border, font=fnt)
self.apLabel.x = self.hpLabel.x
self.apLabel.y = self.hpLabel.y + self.hpLabel.height
self.border.add(self.apLabel)
# Inventory display
self.inventory = ListBox(parent=self.border, width=300, height=300, borderWidth=2, backgroundColor=(255,255,255))
self.inventory.x = self.border.width - self.inventory.width - 10
self.inventory.y = self.border.height - self.inventory.height - 10
#self.inventory.orderItems()
for item in self.character.inventory.stowed:
self.inventory.add(ItemListControl(item, parent=self.inventory))
self.border.add(self.inventory)
# Player stats
self.stats = StatsDisplay(self.character, parent=self.border, editable=self.editable, borderWidth=2)
self.stats.x = 10
self.stats.y = self.inventory.y
self.stats.height = self.inventory.height
self.border.add(self.stats)
if self.editable: self._initEditableComponents()
self.add(self.border)
def _initEditableComponents(self):
# position
self.positionLabel = Label(text="Position: (%d,%d)" % (self.character.px, self.character.py), parent=self.border, font=pygame.font.Font(configuration.regfont, 12))
self.positionLabel.x = self.spriteImage.x
self.positionLabel.y = self.spriteImage.bottom + 5
self.positionLabel.clicked = self.changePositionCb
self.border.add(self.positionLabel)
# 'accept' button
self.okButton = Button(text="OK", parent=self, callback=self.exit, borderWidth=2, font=pygame.font.Font(None, 15))
self.okButton.height = self.closeButton.height
self.okButton.x = self.closeButton.x - self.okButton.width - 5
self.okButton.y = self.closeButton.y
self.add(self.okButton)
# dialog set choice
self.dialogsetLabel = Label(text="Dialog ID: %s" % self.character.dialogId, parent=self.border, font=pygame.font.Font(configuration.regfont, 12))
self.dialogsetLabel.x = self.positionLabel.x
self.dialogsetLabel.y = self.positionLabel.bottom + 5
self.dialogsetLabel.clicked = self.dialogsetChangeCb
self.border.add(self.dialogsetLabel)
# add inventory item
self.addItemButton = Button(text="Add Item", parent=self.border, callback=self.listItemsCb, pos=(self.inventory.x, self.inventory.bottom))
self.border.add(self.addItemButton)
def keypress(self, event):
if event.key == K_ESCAPE: self.exit()
if event.key == K_TAB: self.exit()
def listItemsCb(self, event, control):
controls = []
for item in self.character.scene.itemCollector.items.keys():
controls.append(Button(text=item, callback=self.addItemCb))
ControlDialog("Choose item:", controls).run()
def addItemCb(self, event, control):
item = self.character.scene.itemCollector.createItem(self.character, control.text)
self.character.inventory.stowItem(item)
print "Added %s to %s's inventory" % (item.name, self.character.name)
def resetInventory(self):
self.inventory.sort()
for iic in self.inventory.children: iic.itemBubble = None
def dialogsetChangeCb(self, event, control):
controls = []
for dialogId in self.character.scene.dialogCollector.dialogs.keys():
controls.append(Button(text=dialogId, callback=self.dialogsetChoiceCb))
ControlDialog("Choose dialog:", controls).run()
def dialogsetChoiceCb(self, event, control):
self.character.dialogId = control.text
self.dialogsetLabel.text = "Dialog ID: %s" % self.character.dialogId
control.ancestor.exit()
def levelChangeCb(self, event, control):
id = InputDialog("Enter new value for level:")
id.run()
if id.result:
try:
val = int(id.textInput.text.split('.')[0])
self.character.level = val
self.levelLabel.text = "Level %d" % self.character.level
self.stats._initComponents()
except Exception, e:
ErrorDialog("Failed to set level. (%s)" % e).run()
def statChangeCb(self, event, stat):
id = InputDialog("Enter new value for \"%s\":" % stat)
id.run()
if id.result:
try:
val = int(id.textInput.text)
self.character.stats[stat] = val
self.statLabels[stat].text = "%s: %d" % (stat, self.character.stats[stat])
except Exception, e:
ErrorDialog("Failed to set new stat. (%s)" % e).run()
def changePositionCb(self, event, button):
id = InputDialog("Enter new position in pixels (max: %d,%d)" % (self.character.scene.widthInPixels, self.character.scene.heightInPixels))
id.run()
if id.result:
try:
new_pos = id.textInput.text.replace('(', '')
new_pos = new_pos.replace(')', '')
x = int(new_pos.split(',')[0].strip())
y = int(new_pos.split(',')[1].strip())
self.character.px = x
self.character.py = y
self.positionLabel.text = "Position: (%d,%d)" % (self.character.px, self.character.py)
print "New position: (%d,%d)" % (self.character.px, self.character.py)
except Exception, e:
ErrorDialog("Could not read new position value. (%s)" % e).run()
def spriteChoiceCb(self, event, button):
try:
#ssfn = os.path.join(configuration.spritesetdir, button.text+".zip")
#print "Opening spriteset at %s" % ssfn
ss = Spriteset(button.text)
self.character.changeSpriteset(ss)
self.spriteImage.surface = self.character._spriteset[Spriteset.SOUTH][0]
self.nameLabel.x = self.spriteImage.right + 10
self.jobLabel.x = self.nameLabel.right + 10
self.apLabel.x = self.nameLabel.x
self.hpLabel.x = self.nameLabel.x
self.levelLabel.x = self.nameLabel.x
button.ancestor.done = True
except Exception, ex:
ErrorDialog(text="Failed to read spriteset at that location! (%s)" % ex).run()
def spritesetChangeCb(self, event, button):
controls = []
z = ZipFile(configuration.spritesets)
for spriteset in dataset.splitSets(z.namelist()):
controls.append(Button(text=spriteset, callback=self.spriteChoiceCb))
ControlDialog("Choose spriteset:", controls).run()
def nameChangeCb(self, button, event):
id = InputDialog(text="Enter new name:")
id.run()
self.character.name = id.textInput.text
self.nameLabel.text = self.character.name
class ItemListControl(BorderedControl):
"""A custom control for displaying relevant information in the scrolling list box that
is a character's inventory.
"""
def __init__(self, item, **kwargs):
BorderedControl.__init__(self, **kwargs)
self.borderWidth = 0
self.backgroundColor = (0,0,0,0)
if 'showPrice' in kwargs: self.showPrice = kwargs['showPrice']
else: self.showPrice = False
if 'showEquip' in kwargs: self.showEquip = kwargs['showEquip']
else: self.showEquip = True
if 'showDrop' in kwargs: self.showDrop = kwargs['showDrop']
else: self.showDrop = True
if 'showBuy' in kwargs: self.showBuy = kwargs['showBuy']
else: self.showBuy = False
if 'showSell' in kwargs: self.showSell = kwargs['showSell']
else: self.showSell = False
self.item = item
self._initComponents()
def _initComponents(self):
"Component layout is: Name ... [ @Price ] xCount"
self.children = []
if self.showPrice and self.item.cost > self.item.owner.money: self.cannotBuy = True
else: self.cannotBuy = False
if self.cannotBuy: self.nameLabel = Label(text=self.item.name, parent=self, fontColor=(200,0,0))
else: self.nameLabel = Label(text=self.item.name, parent=self)
self.add(self.nameLabel)
if self.showPrice:
if self.cannotBuy: self.countLabel = Label(text="@%d x%d" % (self.item.cost, self.item.count), parent=self, fontColor=(200,0,0))
else: self.countLabel = Label(text="@%d x%d" % (self.item.cost, self.item.count), parent=self)
else: self.countLabel = Label(text="x%d" % self.item.count, parent=self)
| |
#! /usr/bin/env python3
"""
pratt_tdop_parser.py: Parse shell-like and C-like arithmetic.
"""
import sys
import lexer
from lexer import Token
from tree import Node, CompositeNode
#
# Default parsing functions give errors
#
class ParseError(RuntimeError):
pass
def NullError(p, token, rbp):
raise ParseError("%s can't be used in prefix position" % token)
def LeftError(p, token, rbp, left):
# Hm is this not called because of binding power?
raise ParseError("%s can't be used in infix position" % token)
#
# Parser definition
#
# min and max binding powers
MIN_BP = 0
MAX_BP = 10000
class LeftInfo(object):
def __init__(self, led=None, lbp=MIN_BP, rbp=MIN_BP, nbp=MIN_BP):
self.led = led or LeftError
self.lbp = lbp
self.rbp = rbp
self.nbp = nbp
class NullInfo(object):
def __init__(self, nud=None, lbp=MIN_BP, rbp=MIN_BP, nbp=MIN_BP):
self.nud = nud or NullError
self.lbp = lbp
self.rbp = rbp
self.nbp = nbp
class Parser(object):
"""Recursive TDOP parser."""
def __init__(self):
self.lexer = None # iterable
self.token = None # current token
self.null_lookup = {}
self.left_lookup = {}
"""Specification for a TDOP parser."""
def LookupNull(self, token):
"""Get the parsing function and precedence for a null position token."""
try:
null_info = self.null_lookup[token]
except KeyError:
raise ParseError('Unexpected token %r' % token)
return null_info
def LookupLeft(self, token):
"""Get the parsing function and precedence for a left position token."""
try:
left_info = self.left_lookup[token]
except KeyError:
raise ParseError('Unexpected token %r' % token)
return left_info
def _RegisterNud(self, lbp, rbp, nbp, nud, tokens):
if type(tokens) is str:
self.null_lookup[tokens] = NullInfo(
nud=nud, lbp=lbp, rbp=rbp, nbp=nbp)
if tokens not in self.left_lookup:
self.left_lookup[tokens] = LeftInfo(LeftError) # error
else:
for token in tokens:
self.null_lookup[token] = NullInfo(
nud=nud, lbp=lbp, rbp=rbp, nbp=nbp)
if token not in self.left_lookup:
self.left_lookup[token] = LeftInfo(LeftError) # error
def _RegisterLed(self, lbp, rbp, nbp, led, tokens):
if type(tokens) is str:
if tokens not in self.null_lookup:
self.null_lookup[tokens] = NullInfo(NullError) # error
self.left_lookup[tokens] = LeftInfo(
led=led, lbp=lbp, rbp=rbp, nbp=nbp)
else:
for token in tokens:
if token not in self.null_lookup:
self.null_lookup[token] = NullInfo(NullError) # error
self.left_lookup[token] = LeftInfo(
led=led, lbp=lbp, rbp=rbp, nbp=nbp)
def nilfix(self, bp, nud, tokens):
self._RegisterNud(MIN_BP, MIN_BP, MAX_BP, nud, tokens)
def prefix(self, bp, nud, tokens):
self._RegisterNud(MIN_BP, bp, MAX_BP, nud, tokens)
def suffix(self, bp, led, tokens):
self._RegisterLed(bp, MIN_BP, MAX_BP, led, tokens)
def infixL(self, bp, led, tokens):
self._RegisterLed(bp, bp, bp + 1, led, tokens)
def infixR(self, bp, led, tokens):
self._RegisterLed(bp, bp - 1, bp + 1, led, tokens)
def infixN(self, bp, led, tokens):
self._RegisterLed(bp, bp, bp, led, tokens)
def AtToken(self, token_type):
"""Test if we are looking at a token."""
return self.token.kind == token_type
def Next(self):
"""Move to the next token."""
try:
t = self.lexer.__next__()
if t.kind in ['OPER', 'SYNT']:
t.kind = t.lexem
except StopIteration:
t = Token('eof', 'eof')
self.token = t
def Eat(self, val):
"""Assert the value of the current token, then move to the next token."""
if val and not self.AtToken(val):
raise ParseError('expected %s, got %s' % (val, self.token))
self.Next()
# Examples:
# If we see 1*2+ , rbp = 27 and lbp = 25, so stop.
# If we see 1+2+ , rbp = 25 and lbp = 25, so stop.
# If we see 1**2**, rbp = 26 and lbp = 27, so keep going.
def ParseUntil(self, rbp):
""" Parse to the right, eating tokens until we encounter a token with binding power LESS THAN OR EQUAL TO rbp. """
if self.AtToken('eof'):
raise ParseError('Unexpected end of input')
if rbp < MIN_BP:
raise ParseError(
'rbp=%r must be greater equal than MIN_BP=%r.' %
(rbp, MIN_BP))
t = self.token
self.Next()
null_info = self.LookupNull(t.kind)
node = null_info.nud(self, t, null_info.rbp)
nbp = null_info.nbp # next bp
lbp = self.LookupLeft(self.token.kind).lbp
while rbp < lbp and lbp < nbp:
t = self.token
self.Next()
left_info = self.LookupLeft(t.kind)
node = left_info.led(self, t, left_info.rbp, node)
nbp = left_info.nbp # next bp
lbp = self.LookupLeft(self.token.kind).lbp
return node
def parse(self, s):
self.lexer = lexer.tokenize(s)
self.Next()
r = self.ParseUntil(0)
if not self.AtToken('eof'):
raise ParseError('There are unparsed tokens: %r' % self.token)
return r
#
# Null Denotations -- tokens that take nothing on the left
#
def NullLiteral(p, token, rbp):
""" Name or number """
return Node(token)
def NullParen(p, token, rbp):
""" Arithmetic grouping """
r = p.ParseUntil(rbp)
p.Eat(')')
r.parenthesis = True
return r
def NullPrefixOp(p, token, rbp):
"""Prefix operator
Low precedence: return, raise, etc.
return x+y is return (x+y), not (return x) + y
High precedence: logical negation, bitwise complement, etc.
!x && y is (!x) && y, not !(x && y)
"""
r = p.ParseUntil(rbp)
return CompositeNode(token.kind, [r])
def NullIncDec(p, token, rbp):
""" ++x or ++x[1] """
right = p.ParseUntil(rbp)
if right.token not in ('ID', 'get') and (
right.token is Token and right.token.kind not in ('ID', 'get')):
raise ParseError("Can't assign to %r (%s)" % (right, right.token))
return CompositeNode(token.kind, [right])
#
# Left Denotations -- tokens that take an expression on the left
#
def LeftIncDec(p, token, rbp, left):
""" i++ and i-- """
# if left.token.kind not in ('ID', 'get'):
# raise tdop.ParseError("Can't assign to %r (%s)" % (left, left.token))
token.kind = 'post' + token.kind
return CompositeNode(token.kind, [left])
def LeftFactorial(p, token, rbp, left):
""" 2! """
token.kind = 'post' + token.kind
return CompositeNode(token.kind, [left])
def LeftIndex(p, token, unused_rbp, left):
""" index f[x+1] or f[x][y] """
if left.token.kind not in ('ID', 'get'):
raise ParseError("%s can't be indexed" % left)
index = p.ParseUntil(0)
p.Eat("]")
token.kind = 'get'
return CompositeNode(token.kind, [left, index])
def LeftTernaryOp(p, token, rbp, left):
""" e.g. a > 1 ? x : y """
# 0 binding power since any operators allowed until ':'. See:
#
# http://en.cppreference.com/w/c/language/operator_precedence#cite_note-2
#
# "The expression in the middle of the conditional operator (between ? and
# :) is parsed as if parenthesized: its precedence relative to ?: is
# ignored."
true_expr = p.ParseUntil(0)
p.Eat(':')
false_expr = p.ParseUntil(rbp)
children = [left, true_expr, false_expr]
return CompositeNode(token.kind, children)
def LeftBinaryOp(p, token, rbp, left):
""" Normal binary operator like 1+2 or 2*3, etc. """
return CompositeNode(token.kind, [left, p.ParseUntil(rbp)])
def LeftAssignOp(p, token, rbp, left):
""" Binary assignment operator like x += 1, or a[i] += 1 """
if left.token not in (
'ID', 'get') and left.token.kind not in ('ID', 'get'):
raise ParseError("Can't assign to %r (%s)" % (left, left.token))
return CompositeNode(token.kind, [left, p.ParseUntil(rbp)])
def LeftComma(p, token, rbp, left):
""" foo, bar, baz - Could be sequencing operator, or tuple without parens """
r = p.ParseUntil(rbp)
if not left.parenthesis and left.token == ',': # Keep adding more children
left.children.append(r)
return left
children = [left, r]
return CompositeNode(token.kind, children)
# For overloading of , inside function calls
COMMA_PREC = 10
def LeftFuncCall(p, token, unused_rbp, left):
""" Function call f(a, b). """
children = [left]
# f(x) or f[i](x)
# if left.token.kind not in ('ID', 'get'):
# raise tdop.ParseError("%s can't be called" % left)
while not p.AtToken(')'):
# We don't want to grab the comma, e.g. it is NOT a sequence operator.
children.append(p.ParseUntil(COMMA_PREC))
if p.AtToken(','):
p.Next()
p.Eat(")")
token.kind = 'call'
return CompositeNode(token.kind, children)
def cexp_parser():
parser = Parser()
"""
Compare the code below with this table of C operator precedence:
http://en.cppreference.com/w/c/language/operator_precedence
"""
parser.suffix(310, LeftIncDec, ['++', '--'])
parser.infixL(310, LeftFuncCall, '(')
parser.infixL(310, LeftIndex, '[')
parser.infixL(310, LeftBinaryOp, '.')
parser.infixL(310, LeftBinaryOp, '->')
parser.suffix(300, LeftFactorial, '!')
# 29 -- binds to everything except function call, indexing, postfix ops
parser.prefix(290, NullIncDec, ['++', '--'])
parser.prefix(290, NullPrefixOp, ['+', '!', '~', '-'])
# Right associative: 2 ** 3 ** 2 == 2 ** (3 ** 2)
parser.infixR(270, LeftBinaryOp, '**')
parser.infixL(250, LeftBinaryOp, ['*', '/', '%'])
parser.infixL(230, LeftBinaryOp, ['+', '-'])
parser.infixL(210, LeftBinaryOp, ['<<', '>>'])
parser.infixL(190, LeftBinaryOp, ['<', '>', '<=', '>='])
parser.infixL(170, LeftBinaryOp, ['!=', '=='])
parser.infixL(150, LeftBinaryOp, '&')
parser.infixL(130, LeftBinaryOp, '^')
parser.infixL(110, LeftBinaryOp, '|')
parser.infixL(90, LeftBinaryOp, '&&')
parser.infixL(70, LeftBinaryOp, '||')
parser.infixR(50, LeftTernaryOp, '?')
# Right associative: a = b = 2 is a = (b = 2)
parser.infixR(
30, LeftAssignOp, [
'=', '+=', '-=', '*=', '/=', '%=', '<<=', '>>=', '&=', '^=', '|='])
parser.infixL(COMMA_PREC, LeftComma, ',')
# 0 precedence -- doesn't bind until )
parser.prefix(0, NullParen, '(') # for grouping
# 0 precedence -- never used
parser.nilfix(0, NullLiteral, ['ID', 'NUMBER'])
parser.nilfix(0, NullError, [')', ']', ':', 'eof'])
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == | |
<gh_stars>1-10
import pygame
from word import Word
from hangman import Hangman
from config import *
from button import Button
from bar import Bar
class Game:
def __init__(self):
#Current displaying window booleans
self.menu = True
self.settings = False
self.playing = False
self.help = False
self.pre_play = False
#Game logic
self.player_text = "" # current input from player
self.used_letters = []
self.over = False
self.streak = 0
with open("../assets/stats/balance.txt", "r") as input_file:
self.balance = int(input_file.readlines()[0])
#Window and display
self.width = SCREEN_WIDTH
self.height = SCREEN_HEIGHT
self.volume_sfx = 0.5
self.volume_music = 0.5
self.dark_theme = True
self.language = "english"
self.key_words = EN_DIC # holds all key words depending on the active language
self.theme = "all"
self.themes = ["all", "animals", "capitals", "countries", "hardw"]
self.difficulty = "normal"
self.difficulties = ["easy", "normal", "hard"]
self.images = {}
#Buttons and bars
self.play_button = Button("play", [200, 400], LETTER_SIZE)
self.start_button = Button("start", [200, 500], LETTER_SIZE)
self.return_button = Button(" <", [0, 0], LETTER_SIZE)
self.restart_button = Button("../assets/images/restart.png", [50, 5], LETTER_SIZE, True)
self.settings_button = Button("settings", [200, 450], LETTER_SIZE)
self.help_button = Button("help", [200, 500], LETTER_SIZE)
self.pt_button = Button("PT", [self.width - 100, 200], LETTER_SIZE2)
self.en_button = Button("EN", [self.width - 200, 200], LETTER_SIZE2)
self.theme_button = Button("ON", [self.width - 100, 300], LETTER_SIZE2)
self.right_button1 = Button(">", [0, 0], LETTER_SIZE)
self.right_button2 = Button(">", [0, 0], LETTER_SIZE)
self.left_button1 = Button("<", [0, 0], LETTER_SIZE)
self.left_button2 = Button("<", [0, 0], LETTER_SIZE)
self.aid_button = Button("?", [0, 0], LETTER_SIZE)
self.sfx_bar = Bar([400, 500], 150, 0.5)
self.music_bar = Bar([400, 400], 150, 0.5)
self.buttons = [self.play_button, self.return_button, self.restart_button,
self.settings_button, self.help_button, self.pt_button,
self.en_button, self.start_button, self.right_button1,
self.right_button2, self.left_button1, self.left_button2,
self.aid_button]
self.en_button.press()
#Sounds
pygame.mixer.init()
self.music_playing = False
self.winning_sound = pygame.mixer.Sound("../assets/sounds/win.mp3")
self.lose_sound = pygame.mixer.Sound("../assets/sounds/lose.mp3")
pygame.mixer.music.load("../assets/sounds/menu.mp3")
self.game_over_played = False
def start(self):
'''Starts the game.'''
self.word = Word(self.theme, self.language, self.themes)
self.menu = False
self.playing = True
self.hangman = Hangman()
self.over = False
self.game_over_played = False
if self.difficulty == "easy":
for _ in range(int(self.word.length / 5)):
self.word.solve_letter()
#print(self.word.letters) #print the solution
def update_buttons(self):
'''Changes all the buttons display to the current language (if needed).'''
self.buttons = [self.play_button, self.return_button, self.restart_button,
self.settings_button, self.help_button, self.pt_button,
self.en_button, self.start_button, self.right_button1,
self.right_button2, self.left_button1, self.left_button2,
self.aid_button]
for i in self.buttons:
i.set_text(self.key_words[i.text], self.dark_theme)
i.set_volume(self.volume_sfx)
def write_stats(self):
'''Updates the current balance to memory.'''
with open("../assets/stats/balance.txt", "w") as input_file:
input_file.write(str(self.balance))
def get_images(self):
'''Updates the images according to the selected theme.'''
if self.dark_theme:
self.images["menu"] = pygame.image.load(
"../assets/images/menu.png")
self.images["1"] = pygame.image.load(
"../assets/images/hangman1.png")
self.images["2"] = pygame.image.load(
"../assets/images/hangman2.png")
self.images["3"] = pygame.image.load(
"../assets/images/hangman3.png")
self.images["4"] = pygame.image.load(
"../assets/images/hangman4.png")
self.images["5"] = pygame.image.load(
"../assets/images/hangman5.png")
self.images["6"] = pygame.image.load(
"../assets/images/hangman6.png")
self.images["7"] = pygame.image.load(
"../assets/images/hangman7.png")
self.images["8"] = pygame.image.load(
"../assets/images/hangman8.png")
self.images["help_english"] = pygame.image.load(
"../assets/images/help_english.png")
self.images["help_portuguese"] = pygame.image.load(
"../assets/images/help_portuguese.png")
else:
self.images["menu"] = pygame.image.load(
"../assets/images/menu_light.png")
self.images["1"] = pygame.image.load(
"../assets/images/hangman1_light.png")
self.images["2"] = pygame.image.load(
"../assets/images/hangman2_light.png")
self.images["3"] = pygame.image.load(
"../assets/images/hangman3_light.png")
self.images["4"] = pygame.image.load(
"../assets/images/hangman4_light.png")
self.images["5"] = pygame.image.load(
"../assets/images/hangman5_light.png")
self.images["6"] = pygame.image.load(
"../assets/images/hangman6_light.png")
self.images["7"] = pygame.image.load(
"../assets/images/hangman7_light.png")
self.images["8"] = pygame.image.load(
"../assets/images/hangman8_light.png")
self.images["help_english"] = pygame.image.load(
"../assets/images/help_english_light.png")
self.images["help_portuguese"] = pygame.image.load(
"../assets/images/help_portuguese_light.png")
def render_menu(self, win):
'''Renders the menu tab.
@win - The game window.
'''
menu_img = self.images["menu"]
win.blit(menu_img, (self.width / 2 - 300, self.height / 2 - 300))
# menu play button
self.play_button.center(self.width)
self.play_button.set_y(self.height / 2 + 100)
self.play_button.render(win)
# menu settings button
self.settings_button.center(self.width)
self.settings_button.set_y(self.height / 2 + 150)
self.settings_button.render(win)
# menu help button
self.help_button.center(self.width)
self.help_button.set_y(self.height / 2 + 200)
self.help_button.render(win)
def render_help(self, win):
'''Renders the help tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
image = self.images["help_%s" % (str(self.language))]
win.blit(image, (self.width / 2 - 300, self.height / 2 - 400))
self.return_button.render(win)
# Render the tab title
text = font.render(self.key_words["help"], True, WHITE)
win.blit(text, (self.width / 2 - text.get_width() / 2, 0))
def render_settings(self, win):
'''Renders the settings tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
font2 = pygame.font.Font(FONT_NAME, LETTER_SIZE2)
# Render the tab title
if self.dark_theme: text = font.render(self.key_words["settings"], True, WHITE)
else: text = font.render(self.key_words["settings"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, 0))
# Render the language options
if self.dark_theme: text = font2.render(self.key_words["language"], True, WHITE)
else: text = font2.render(self.key_words["language"], True, BLACK)
win.blit(text, (50, 200))
self.pt_button.allign_right(50, self.width)
self.pt_button.render(win)
self.en_button.set_x(self.pt_button.coords[0] - 100)
self.en_button.render(win)
if self.dark_theme: text = font2.render(self.key_words["dark mode"], True, WHITE)
else: text = font2.render(self.key_words["dark mode"], True, BLACK)
win.blit(text, (50, 300))
self.theme_button.allign_right(50, self.width)
self.theme_button.render(win)
if self.dark_theme: text = font2.render(self.key_words["music"], True, WHITE)
else: text = font2.render(self.key_words["music"], True, BLACK)
win.blit(text, (50, 400))
self.music_bar.allign_right(50, self.width)
self.music_bar.render(win, self.dark_theme)
if self.dark_theme: text = font2.render(self.key_words["sfx"], True, WHITE)
else: text = font2.render(self.key_words["sfx"], True, BLACK)
win.blit(text, (50, 500))
self.sfx_bar.allign_right(50, self.width)
self.sfx_bar.render(win, self.dark_theme)
def render_playing(self, win):
'''Renders the playing tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
#####Render the alphabet in the bottom#####
pos_x = self.width / 2 - LETTER_SIZE * (len(ALPHABET) / 4) + 20
#the extra 20 are added because each letter only fills half of the
#available size which would leave space to the right
pos_y = self.height - 100
for i in ALPHABET:
if self.dark_theme: text = font.render(i, 1, WHITE)
else: text = font.render(i, 1, BLACK)
if i in self.word.used_letters or i in self.word.filled_letters:
if self.dark_theme: text = font.render(i, 1, BLACK)
else: text = font.render(i, 1, WHITE)
win.blit(text, (pos_x - (text.get_width() / 2), pos_y))
pos_x += LETTER_SIZE
if i == 'm':
pos_y += LETTER_SIZE + 1
pos_x = self.width / 2 - \
LETTER_SIZE * (len(ALPHABET) / 4) + 20
######Draw the hangman#####
self.hangman.draw(win, self.images, self.width)
#####Draw the playing word#####
self.word.draw(win, self.dark_theme, self.width, self.height)
#####Display game over messages#####
if self.hangman.state == 8:
if self.dark_theme: text = font.render(self.key_words["lost"], True, WHITE)
else: text = font.render(self.key_words["lost"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 + 60))
elif not '_' in self.word.filled_letters:
if self.dark_theme: text = font.render(self.key_words["won"], 1, WHITE)
else: text = font.render(self.key_words["won"], 1, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 + 60))
#####Render buttons#####
self.restart_button.render(win)
self.aid_button.allign_right(20, self.width)
self.aid_button.set_y(self.height / 2 - 160)
self.aid_button.render(win)
#####Render the streak#####
if self.streak > 0:
if self.dark_theme: text = font.render(str(self.streak), True, WHITE)
else: text = font.render(str(self.streak), True, BLACK)
win.blit(text, (20, self.height / 2 - 160))
#####Render the balance#####
if self.dark_theme: text = font.render(str(self.balance), True, WHITE)
else: text = font.render(str(self.balance), True, BLACK)
win.blit(text, (self.width - len(str(self.balance)) * 20 - 20, 0))
def render_pre_play(self, win):
'''Renders the pre_play tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
font2 = pygame.font.Font(FONT_NAME, LETTER_SIZE2)
# Render the theme options
if self.dark_theme: text = font.render(self.key_words["theme"], True, WHITE)
else: text = font.render(self.key_words["theme"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - 200))
if self.dark_theme: text = font2.render(self.key_words[self.theme], True, WHITE)
else: text = font2.render(self.key_words[self.theme], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - 140))
self.right_button1.set_x(self.width / 2 + 150)
self.right_button1.set_y(self.height / 2 - 140)
self.right_button1.render(win)
self.left_button1.set_x(self.width / 2 - 150)
self.left_button1.set_y(self.height / 2 - 140)
self.left_button1.render(win)
# Render the difficulty options
if self.dark_theme: text = font.render(self.key_words["difficulty"], True, WHITE)
else: text = font.render(self.key_words["difficulty"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - 40))
if self.dark_theme: text = font2.render(self.key_words[self.difficulty], True, WHITE)
else: text = font2.render(self.key_words[self.difficulty], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 + 20))
self.right_button2.set_x(self.width / 2 + 150)
self.right_button2.set_y(self.height / 2 + 20)
self.right_button2.render(win)
self.left_button2.set_x(self.width / 2 - 150)
self.left_button2.set_y(self.height / 2 + 20)
self.left_button2.render(win)
self.start_button.center(self.width)
self.start_button.set_y(self.height - 100)
self.start_button.render(win)
def render(self, win):
'''Renders the current tab.
@win - The game window.
'''
self.width = pygame.display.get_surface().get_width()
self.height = pygame.display.get_surface().get_height()
self.update_buttons()
self.get_images()
win.fill(BLACK)
if not self.dark_theme: win.fill(WHITE)
if self.menu:
self.render_menu(win)
elif self.help:
self.render_help(win)
else:
win.fill(BLACK)
if not self.dark_theme: win.fill(WHITE)
self.return_button.render(win)
if self.settings:
self.render_settings(win)
elif self.playing:
self.render_playing(win)
elif self.pre_play:
self.render_pre_play(win)
pygame.display.update()
def handle_envents(self):
'''Handles key and mouse presses.
@return - False if the exit button was pressed else True.
'''
mouse = pygame.mouse.get_pos()
# print(mouse)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.write_stats()
return False
elif event.type == pygame.VIDEORESIZE:
width, height = event.size
if width < 600:
width = 600
if height < 600:
height = 600
win = pygame.display.set_mode((width,height), pygame.RESIZABLE)
| |
# Copyright 2015-2018 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import mock
import pytest
from boto3.exceptions import Boto3Error
from ruamel.yaml import YAML
from paasta_tools.cli.cmds.spark_run import configure_and_run_docker_container
from paasta_tools.cli.cmds.spark_run import create_spark_config_str
from paasta_tools.cli.cmds.spark_run import emit_resource_requirements
from paasta_tools.cli.cmds.spark_run import get_docker_cmd
from paasta_tools.cli.cmds.spark_run import get_docker_run_cmd
from paasta_tools.cli.cmds.spark_run import get_smart_paasta_instance_name
from paasta_tools.cli.cmds.spark_run import get_spark_app_name
from paasta_tools.cli.cmds.spark_run import get_spark_config
from paasta_tools.cli.cmds.spark_run import sanitize_container_name
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import SystemPaastaConfig
@mock.patch("paasta_tools.cli.cmds.spark_run.os.geteuid", autospec=True)
@mock.patch("paasta_tools.cli.cmds.spark_run.os.getegid", autospec=True)
def test_get_docker_run_cmd(mock_getegid, mock_geteuid):
mock_geteuid.return_value = 1234
mock_getegid.return_value = 100
container_name = "fake_name"
volumes = ["v1:v1:rw", "v2:v2:rw"]
env = {"k1": "v1", "k2": "v2"}
docker_img = "fake-registry/fake-service"
docker_cmd = "pyspark"
nvidia = False
actual = get_docker_run_cmd(
container_name, volumes, env, docker_img, docker_cmd, nvidia
)
assert actual[5:] == [
"--user=1234:100",
"--name=fake_name",
"--env",
"k1=v1",
"--env",
"k2=v2",
"--volume=v1:v1:rw",
"--volume=v2:v2:rw",
"fake-registry/fake-service",
"sh",
"-c",
"pyspark",
{},
]
@pytest.mark.parametrize(
"container_name,expected",
[
# name should always start with [a-zA-Z0-9]
("~!.abcde", "abcde"),
# name with none supported chars will be replace with _
("to~be?or not to be!", "to_be_or_not_to_be_"),
],
)
def test_sanitize_container_name(container_name, expected):
assert sanitize_container_name(container_name) == expected
@pytest.mark.parametrize("mrjob", [True, False])
def test_get_smart_paasta_instance_name(mrjob):
args = argparse.Namespace(
instance="foo", cmd="USER blah spark-submit blah blah blah", mrjob=mrjob,
)
with mock.patch(
"paasta_tools.cli.cmds.spark_run.get_username",
return_value="root",
autospec=True,
):
assert (
get_smart_paasta_instance_name(args) == "foo_root_mrjob"
if mrjob
else "foo_root_spark-submit"
)
def test_get_smart_paasta_instance_name_tron():
args = argparse.Namespace(
instance="foo", cmd="spark-submit blah blah blah", mrjob=True,
)
with mock.patch(
"paasta_tools.cli.cmds.spark_run.os.environ",
dict(
TRON_JOB_NAMESPACE="master",
TRON_JOB_NAME="yelp-main",
TRON_ACTION="rm_rf_slash",
),
autospec=None,
):
assert get_smart_paasta_instance_name(args) == "yelp-main.rm_rf_slash"
class TestGetSparkConfig:
dev_account_id = "12345"
dev_log_dir = "s3a://dev/log/path"
other_account_id = "23456"
other_log_dir = "s3a://other/log/path"
unrecognized_account_id = "34567"
@pytest.fixture(autouse=True)
def mock_find_mesos_leader(self):
with mock.patch(
"paasta_tools.cli.cmds.spark_run.find_mesos_leader", autospec=True
) as m:
m.return_value = "fake_leader:5050"
yield m
@pytest.fixture(autouse=True)
def mock_account_id(self):
with mock.patch("boto3.client", autospec=True) as m:
mock_account_id = m.return_value.get_caller_identity.return_value.get
mock_account_id.return_value = self.dev_account_id
yield mock_account_id
@pytest.fixture(autouse=True)
def mock_spark_run_config(self, tmpdir):
spark_run_file = str(tmpdir.join("spark_config.yaml"))
spark_run_conf = {
"environments": {
"dev": {
"account_id": self.dev_account_id,
"default_event_log_dir": self.dev_log_dir,
},
"test_dev": {
"account_id": self.other_account_id,
"default_event_log_dir": self.other_log_dir,
},
}
}
with open(spark_run_file, "w") as fp:
YAML().dump(spark_run_conf, fp)
yield spark_run_file
@pytest.fixture(autouse=True)
def mock_load_mesos_secret_for_spark(self):
with mock.patch(
"paasta_tools.cli.cmds.spark_run.load_mesos_secret_for_spark", autospec=True
) as m:
yield m
def get_spark_config(self, paasta_args=None):
paasta_args = paasta_args or {}
args = mock.MagicMock()
for k, v in paasta_args.items():
setattr(args, k, v)
if "cluster" not in paasta_args:
args.cluster = "fake_cluster"
return get_spark_config(
args=args,
spark_app_name="fake_name",
spark_ui_port=123,
docker_img="fake-registry/fake-service",
system_paasta_config=SystemPaastaConfig(
{"cluster_fqdn_format": "paasta-{cluster:s}.something"}, "fake_dir"
),
volumes=["v1:v1:rw", "v2:v2:rw"],
access_key="test_access_key",
secret_key="test_secret_key",
session_token=None,
)
def test_find_master(self):
spark_conf = self.get_spark_config()
assert spark_conf["spark.master"] == "mesos://fake_leader:5050"
assert "spark.master=mesos://fake_leader:5050" in create_spark_config_str(
spark_conf, is_mrjob=False
)
@pytest.mark.parametrize(
"spark_args,expected_partitions",
[
("spark.cores.max=10", 20),
("spark.cores.max=10 spark.sql.shuffle.partitions=14", 14),
],
)
def test_default_shuffle_partitions(self, spark_args, expected_partitions):
spark_conf = self.get_spark_config({"spark_args": spark_args})
assert int(spark_conf["spark.sql.shuffle.partitions"]) == expected_partitions
@pytest.mark.parametrize(
"default_event_log_dir,spark_args,expected_spark_config",
[
# do not override if user disabled the eventlog
(
dev_log_dir,
"spark.eventLog.enabled=false",
{"spark.eventLog.enabled": "false"},
),
# do not override if user manually configure logDir
(
dev_log_dir,
"spark.eventLog.dir=s3a://fake-location",
{
"spark.eventLog.enabled": "true",
"spark.eventLog.dir": "s3a://fake-location",
},
),
# use specified dir if account id is found in config
(
dev_log_dir,
None,
{"spark.eventLog.enabled": "true", "spark.eventLog.dir": dev_log_dir},
),
# do not enable if no default_event_log_dir found in config
(None, None, {"spark.eventLog.enabled": None, "spark.eventLog.dir": None}),
],
)
def test_event_logging(
self, default_event_log_dir, spark_args, expected_spark_config
):
args = {}
if spark_args is not None:
args["spark_args"] = spark_args
with mock.patch(
"paasta_tools.cli.cmds.spark_run.get_default_event_log_dir", autospec=True
) as m:
m.return_value = default_event_log_dir
spark_conf = self.get_spark_config(args)
for key, expected_value in expected_spark_config.items():
if expected_value is None:
assert key not in spark_conf
else:
assert spark_conf[key] == expected_value
@mock.patch("paasta_tools.cli.cmds.spark_run.get_aws_credentials", autospec=True)
@mock.patch("paasta_tools.cli.cmds.spark_run.os.path.exists", autospec=True)
@mock.patch("paasta_tools.cli.cmds.spark_run.pick_random_port", autospec=True)
@mock.patch("paasta_tools.cli.cmds.spark_run.get_username", autospec=True)
@mock.patch("paasta_tools.cli.cmds.spark_run.get_spark_config", autospec=True)
@mock.patch("paasta_tools.cli.cmds.spark_run.run_docker_container", autospec=True)
@mock.patch("time.time", autospec=True)
class TestConfigureAndRunDockerContainer:
instance_config = InstanceConfig(
cluster="fake_cluster",
instance="fake_instance",
service="fake_service",
config_dict={
"extra_volumes": [{"hostPath": "/h1", "containerPath": "/c1", "mode": "RO"}]
},
branch_dict={"docker_image": "fake_service:fake_sha"},
)
system_paasta_config = SystemPaastaConfig(
{"volumes": [{"hostPath": "/h2", "containerPath": "/c2", "mode": "RO"}]},
"fake_dir",
)
@pytest.fixture
def mock_create_spark_config_str(self):
with mock.patch(
"paasta_tools.cli.cmds.spark_run.create_spark_config_str", autospec=True
) as _mock_create_spark_config_str:
yield _mock_create_spark_config_str
def test_configure_and_run_docker_container(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
):
mock_pick_random_port.return_value = 123
mock_get_username.return_value = "fake_user"
mock_get_spark_config.return_value = {"spark.app.name": "fake_app"}
mock_run_docker_container.return_value = 0
mock_get_aws_credentials.return_value = ("id", "secret", "token")
args = mock.MagicMock()
args.aws_region = "fake_region"
args.cluster = "fake_cluster"
args.cmd = "pyspark"
args.work_dir = "/fake_dir:/spark_driver"
args.dry_run = True
args.mrjob = False
args.nvidia = False
with mock.patch(
"paasta_tools.utils.get_service_docker_registry",
autospec=True,
return_value="fake-registry",
):
retcode = configure_and_run_docker_container(
args=args,
docker_img="fake-registry/fake-service",
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
assert retcode == 0
mock_run_docker_container.assert_called_once_with(
container_name="paasta_spark_run_fake_user_123",
volumes=[
"/h1:/c1:ro",
"/h2:/c2:ro",
"/fake_dir:/spark_driver:rw",
"/etc/passwd:/etc/passwd:ro",
"/etc/group:/etc/group:ro",
"/nail/home:/nail/home:rw",
],
environment={
"PAASTA_SERVICE": "fake_service",
"PAASTA_INSTANCE": "fake_instance",
"PAASTA_CLUSTER": "fake_cluster",
"PAASTA_INSTANCE_TYPE": "spark",
"PAASTA_DEPLOY_GROUP": "fake_cluster.fake_instance",
"PAASTA_DOCKER_IMAGE": "fake_service:fake_sha",
"PAASTA_LAUNCHED_BY": mock.ANY,
"AWS_ACCESS_KEY_ID": "id",
"AWS_SECRET_ACCESS_KEY": "secret",
"AWS_DEFAULT_REGION": "fake_region",
"SPARK_USER": "root",
"SPARK_OPTS": "--conf spark.app.name=fake_app",
"PAASTA_RESOURCE_CPUS": "1",
"PAASTA_RESOURCE_DISK": "1024",
"PAASTA_RESOURCE_MEM": "4096",
"PAASTA_GIT_SHA": "fake_ser",
},
docker_img="fake-registry/fake-service",
docker_cmd="pyspark --conf spark.app.name=fake_app",
dry_run=True,
nvidia=False,
)
def test_configure_and_run_docker_container_nvidia(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
):
mock_get_aws_credentials.return_value = ("id", "secret", "token")
with mock.patch(
"paasta_tools.cli.cmds.spark_run.emit_resource_requirements", autospec=True
) as mock_emit_resource_requirements, mock.patch(
"paasta_tools.cli.cmds.spark_run.clusterman_metrics", autospec=True
):
mock_get_spark_config.return_value = {
"spark.cores.max": 5,
"spark.master": "mesos://spark.master",
}
args = mock.MagicMock(cmd="pyspark", nvidia=True)
configure_and_run_docker_container(
args=args,
docker_img="fake-registry/fake-service",
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
args, kwargs = mock_run_docker_container.call_args
assert kwargs["nvidia"]
assert mock_emit_resource_requirements.called
def test_configure_and_run_docker_container_mrjob(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
):
mock_get_aws_credentials.return_value = ("id", "secret", "token")
with mock.patch(
"paasta_tools.cli.cmds.spark_run.emit_resource_requirements", autospec=True
) as mock_emit_resource_requirements, mock.patch(
"paasta_tools.cli.cmds.spark_run.clusterman_metrics", autospec=True
):
mock_get_spark_config.return_value = {
"spark.cores.max": 5,
"spark.master": "mesos://spark.master",
}
args = mock.MagicMock(cmd="python mrjob_wrapper.py", mrjob=True)
configure_and_run_docker_container(
args=args,
docker_img="fake-registry/fake-service",
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
args, kwargs = mock_run_docker_container.call_args
assert kwargs["docker_cmd"] == (
"python mrjob_wrapper.py --spark-master=mesos://spark.master --jobconf spark.cores.max=5"
)
assert mock_emit_resource_requirements.called
def test_suppress_clusterman_metrics_errors(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
mock_create_spark_config_str,
):
mock_get_aws_credentials.return_value = ("id", "secret", "token")
with mock.patch(
"paasta_tools.cli.cmds.spark_run.emit_resource_requirements", autospec=True
) as mock_emit_resource_requirements, mock.patch(
"paasta_tools.cli.cmds.spark_run.clusterman_metrics", autospec=True
):
mock_emit_resource_requirements.side_effect = Boto3Error
mock_create_spark_config_str.return_value = "--conf spark.cores.max=5"
args = mock.MagicMock(
suppress_clusterman_metrics_errors=False, cmd="pyspark"
)
with pytest.raises(Boto3Error):
configure_and_run_docker_container(
args=args,
docker_img="fake-registry/fake-service",
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
# make sure we don't blow up when this setting is True
args.suppress_clusterman_metrics_errors = True
configure_and_run_docker_container(
args=args,
docker_img="fake-registry/fake-service",
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
def test_dont_emit_metrics_for_inappropriate_commands(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
mock_create_spark_config_str,
):
mock_get_aws_credentials.return_value = ("id", "secret", "token")
with mock.patch(
"paasta_tools.cli.cmds.spark_run.emit_resource_requirements", autospec=True
) as mock_emit_resource_requirements, mock.patch(
"paasta_tools.cli.cmds.spark_run.clusterman_metrics", autospec=True
):
mock_create_spark_config_str.return_value = "--conf spark.cores.max=5"
args = mock.MagicMock(cmd="bash", mrjob=False)
configure_and_run_docker_container(
args=args,
docker_img="fake-registry/fake-service",
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
assert not mock_emit_resource_requirements.called
def test_emit_resource_requirements(tmpdir):
spark_config_dict = {
"spark.executor.cores": "2",
"spark.cores.max": "4",
"spark.executor.memory": "4g",
"spark.mesos.executor.memoryOverhead": "555",
"spark.app.name": "paasta_spark_run_johndoe_2_3",
"spark.mesos.constraints": "pool:cool-pool\\;other:value",
}
clusterman_yaml_contents = {
"clusters": {"anywhere-prod": {"aws_region": "us-north-14"}}
}
clusterman_yaml_file_path = tmpdir.join("fake_clusterman.yaml")
with open(clusterman_yaml_file_path, "w") as f:
YAML().dump(clusterman_yaml_contents, f)
with mock.patch(
"paasta_tools.cli.cmds.spark_run.get_clusterman_metrics", autospec=True
), mock.patch(
"paasta_tools.cli.cmds.spark_run.clusterman_metrics", autospec=True
) as mock_clusterman_metrics, mock.patch(
"paasta_tools.cli.cmds.spark_run.CLUSTERMAN_YAML_FILE_PATH",
clusterman_yaml_file_path,
autospec=None, # we're replacing this name, so we can't autospec
), mock.patch(
"time.time", return_value=1234, autospec=True
), mock.patch(
"paasta_tools.cli.cmds.spark_run.get_spark_resource_requirements",
autospec=True,
) as mock_spark_requirements:
metric_key_template = "requested_{resource}|framework_name=paasta_spark_run_johndoe_2_3,webui_url=http://spark.yelp"
expected_memory_request = (4 * 1024 + 555) * 2
mock_spark_requirements.return_value = {
"cpus": (metric_key_template.format(resource="cpus"), 4),
"mem": (
metric_key_template.format(resource="mem"),
expected_memory_request,
),
"disk": (
metric_key_template.format(resource="disk"),
expected_memory_request,
),
}
emit_resource_requirements(
spark_config_dict, "anywhere-prod", "http://spark.yelp"
)
mock_clusterman_metrics.ClustermanMetricsBotoClient.assert_called_once_with(
region_name="us-north-14", app_identifier="cool-pool"
)
metrics_writer = (
mock_clusterman_metrics.ClustermanMetricsBotoClient.return_value.get_writer.return_value.__enter__.return_value
)
metrics_writer.send.assert_has_calls(
[
mock.call((metric_key_template.format(resource="cpus"), 1234, 4)),
mock.call(
(
metric_key_template.format(resource="mem"),
1234,
expected_memory_request,
)
),
mock.call(
(
metric_key_template.format(resource="disk"),
1234,
expected_memory_request,
)
),
],
any_order=True,
)
@pytest.mark.parametrize(
"cmd,expected_name",
[
# spark-submit use first batch name append user name and port
(
"spark-submit path/to/my-script.py --some-configs a.py",
"paasta_my-script_fake_user_1234",
),
# spark-submit with env settings
(
"USER=TEST spark-submit path/to/my-script.py --some-configs a.py",
"paasta_my-script_fake_user_1234",
),
# spark-submit that is unable to find .py script, use the default name
# with user name and port
("spark-submit path/to/my-script.jar", "paasta_spark_run_fake_user_1234"),
# non jupyter-lab cmd use the default name and append user name and port
("pyspark", "paasta_spark_run_fake_user_1234",),
# jupyterlab we have a different name
("jupyter-lab", "paasta_jupyter_fake_user_1234"),
],
)
def test_get_spark_app_name(cmd, expected_name):
spark_ui_port = 1234
with mock.patch("paasta_tools.cli.cmds.spark_run.get_username", autospec=True) as m:
m.return_value = "fake_user"
assert get_spark_app_name(cmd, spark_ui_port) == expected_name
def test_get_docker_cmd_add_spark_conf_str():
args = mock.Mock(cmd="pyspark -v", mrjob=False)
instance_config = None
spark_conf_str = "--conf spark.app.name=fake_app"
docker_cmd = get_docker_cmd(args, instance_config, spark_conf_str)
assert docker_cmd == "pyspark --conf spark.app.name=fake_app -v"
def test_get_docker_cmd_other_cmd():
args = mock.Mock(cmd="bash", mrjob=False)
instance_config = None
spark_conf_str = "--conf spark.app.name=fake_app"
assert get_docker_cmd(args, instance_config, spark_conf_str) == "bash"
def test_get_docker_cmd_mrjob():
args = mock.Mock(cmd="python mrjob_wrapper.py", mrjob=True)
instance_config = None
spark_conf_str = "--jobconf spark.app.name=fake_app"
expected_cmd = "python mrjob_wrapper.py --jobconf spark.app.name=fake_app"
assert get_docker_cmd(args, instance_config, | |
Attribute(
"Use scope as template / content data?",
type="boolean",
required=False,
default=True,
)
def extend_context(self, context):
"""Hook to extend the context."""
@wrap_element_error
def run(self, context):
(content, templates, requires, withscope) = self.get_parameters(
context, "content", "template", "requires", "withscope"
)
if self.has_parameter("requires"):
if not requires:
raise logic.EndLogic(http.RespondForbidden())
self.extend_context(context)
yield logic.DeferNodeContents(self)
if "_return" in context:
scope = get_return(context.get("_return"))
else:
if withscope:
scope = context[".call"]
else:
scope = {}
if scope is not None and not isinstance(scope, Content):
app = self.get_app(context)
template = self.resolve_templates(app, templates)
# if content is None and self.younger_sibling.check_type(namespaces.default, 'content'):
# content = self.younger_sibling
if content is not None:
if not hasattr(scope, "items"):
self.throw(
"view.bad-return",
"View should return a dict or other mapping object (not {})".format(
to_expression(scope)
),
)
for defer in self.generate_content(context, content, app, td=scope):
yield defer
context.copy("_content", "_return")
elif template is not None:
render_container = RenderContainer.create(app, template=template)
render_container.update(scope)
context["_return"] = render_container
class AppUrlsProxy(object):
def __moyacontext__(self, context):
urls = context.get(".urls")
app = context[".app"]
return urls[app.name]
class Trace(object):
def __init__(self, target, app=None, route_data=None, response=None):
self.target = target
self.app = app
self.route_data = route_data
if isinstance(response, http.RespondWith):
self.response = text_type(response)
else:
self.response = None
def __moyarepr__(self, context):
return "<trace>"
@property
def target_html(self):
return syntax.highlight("target", self.target, line_numbers=False)
class GetLocale(DataSetter):
"""Get an object containing locale information"""
class Help:
synopsis = "get locale information"
locale = Attribute("Locale name")
def logic(self, context):
_locale = self.locale(context)
try:
locale = LocaleProxy(_locale)
except:
self.throw(
"get-locale.unknown-locale",
'''Couldn't get locale information for "{}"'''.format(_locale),
)
self.set_context(context, self.dst(context), locale)
class SetLocale(LogicElement):
"""Switches the current locale"""
class Help:
synopsis = "switch the current locale"
locale = Attribute("Locale name")
def logic(self, context):
_locale = self.locale(context)
try:
locale = LocaleProxy(_locale)
except:
self.throw(
"change-locale.unknown-locale",
'''Couldn't get locale information for "{}"'''.format(_locale),
)
context[".locale"] = locale
class SetLanguage(LogicElement):
"""Set the current language"""
class Help:
synopsis = "set the current language"
language = Attribute("Language code")
def logic(self, context):
language = self.language(context)
if not isinstance(language, list):
language = [language]
context[".languages"] = language
class Server(LogicElement):
"""Defines a server"""
class Help:
synopsis = "define a server"
def post_build(self, context):
self.urlmapper = URLMapper()
self.middleware = {"request": URLMapper(), "response": URLMapper()}
self.fs = None
super(Server, self).post_build(context)
def startup(self, archive, context, fs, breakpoint=False):
self.fs = fs
archive.build_libs()
try:
if breakpoint:
logic.debug(archive, context, logic.DeferNodeContents(self))
else:
logic.run_logic(archive, context, logic.DeferNodeContents(self))
except Exception as e:
# import traceback
# traceback.print_exc(e)
raise
archive.build_libs()
def get_url(self, app_name, url_name, params=None):
app_routes = self.urlmapper.get_routes(app_name)
url = None
# Could be multiple routes for this name
# Try each one and return the url that doesn't fail
for route in app_routes[:-1]:
try:
url = route.target.get_url(url_name, params, base_route=route)
except RouteError:
continue
else:
break
else:
# Last one, if this throws an exception, we want it to propagate
route = app_routes[-1]
url = route.target.get_url(url_name, params, base_route=route)
return url
def trace(self, archive, url, method="GET"):
for route_match in self.urlmapper.iter_routes(url, method):
route_data = route_match.data
target = route_match.target
if target:
for element_ref in target:
app = archive.get_app(route_data.get("app", None))
yield (route_data, archive.get_element(element_ref, app))
def process_response(self, context, response):
cookies = context.root.get("cookiejar", {})
for cookie in itervalues(cookies):
cookie.set(response)
for cookie_name in cookies.deleted_cookies:
response.delete_cookie(cookie_name)
try:
if not response.date and "now" in context.root:
response.date = context.root["now"]._dt
except:
# Don't want to discard the response here, so log exception
log.exception("error setting response date")
return response
def render_response(self, archive, context, obj, status=StatusCode.ok):
response = Response(
charset=py2bytes("utf8"), status=int(getattr(obj, "http_status", status))
)
result = render_object(obj, archive, context, "html")
response.text = text_type(result)
return self.process_response(context, response)
def _dispatch_result(self, archive, context, request, result, status=StatusCode.ok):
if result is None:
return None
if isinstance(result, ReplaceRequest):
return result
if isinstance(result, RespondWith):
return self.dispatch_handler(
archive, context, request, status=result.status, headers=result.headers
)
if not isinstance(result, Response):
status = int(getattr(result, "http_status", None) or status)
response = MoyaResponse(charset=py2bytes("utf8"), status=status)
html = render_object(result, archive, context, "html")
response.text = html
else:
response = result
return self.process_response(context, response)
def handle_error(self, archive, context, request, error, exc_info):
context.safe_delete("._callstack")
context.safe_delete(".call")
return self.dispatch_handler(
archive,
context,
request,
status=StatusCode.internal_error,
error=error,
exc_info=exc_info,
)
def _dispatch_mapper(
self, archive, context, mapper, url, method="GET", status=None, breakpoint=False
):
"""Loop to call targets for a url/method/status combination"""
dispatch_trace = context.root.get("_urltrace", [])
if breakpoint:
call = archive.debug_call
else:
call = archive.call
root = context.root
for route_data, target, name in mapper.iter_routes(url, method, status):
root.update(urlname=name, headers={})
if target:
for element_ref in target:
app, element = archive.get_element(element_ref)
if element:
app = app or archive.get_app(route_data.get("app", None))
context.root.update(url=route_data)
result = call(element_ref, context, app, url=route_data)
dispatch_trace.append(
Trace(element_ref, app, route_data, result)
)
if result is not None:
yield result
else:
dispatch_trace.append(Trace(element_ref))
else:
dispatch_trace.append(Trace(element_ref))
@classmethod
def set_site(cls, archive, context, request):
"""Set site data for a request"""
domain = request.host
if ":" in domain:
domain = domain.split(":", 1)[0]
site_instance = archive.sites.match(domain, context=context)
if site_instance is None:
log.error(
'no site matching domain "{domain}", consider adding [site:{domain}] to settings'.format(
domain=domain
)
)
return None
context.root["sys"]["site"] = site_instance
try:
context.root["sys"]["base"] = archive.project_fs.getsyspath("/")
except NoSysPath:
context.root["sys"]["base"] = None
context.root["site"] = site_instance._data
return site_instance
@classmethod
def _get_tz(self, context, default_timezone="UTC", user_timezone=False):
"""lazy insertion of .tz"""
if context is None:
context = pilot.context
tz = None
if user_timezone:
tz = context.get(".user.timezone", None)
if not tz:
tz = context.get(".sys.site.timezone", None)
if not tz:
tz = default_timezone
if not tz:
return None
try:
return Timezone(tz)
except pytz.UnknownTimeZoneError:
log.error("invalid value for timezone '%s', defaulting to UTC", tz)
return Timezone("UTC")
def run_middleware(self, stage, archive, context, request, url, method):
middleware = self.middleware[stage]
try:
for result in self._dispatch_mapper(
archive, context, middleware, url, method
):
response = self._dispatch_result(archive, context, request, result)
if response:
return response
except Exception as e:
return self.handle_error(archive, context, request, e, sys.exc_info())
def _populate_context(self, archive, context, request):
"""Add standard values to context."""
populate_context = {
"permissions": {},
"libs": archive.libs,
"apps": archive.apps,
"debug": archive.debug,
"develop": archive.develop,
"sys": {},
"server": self,
"urls": self.urlmapper,
"now": ExpressionDateTime.moya_utcnow(),
"appurls": AppUrlsProxy(),
"moya": {"version": __version__},
"enum": archive.enum,
"accept_language": list(request.accept_language),
"media_url": archive.media_url,
"filters": archive.filters,
"secret": archive.secret,
}
context.root.update(populate_context)
set_dynamic(context)
def dispatch(self, archive, context, request, breakpoint=False):
"""Dispatch a request to the server and return a response object."""
url = request.path_info
method = request.method
self._populate_context(archive, context, request)
site = self.set_site(archive, context, request)
if site is None:
# No site match, return a 404
return self.dispatch_handler(
archive, context, request, StatusCode.not_found
)
root = context.root
if site.head_as_get and method == "HEAD":
# Treat HEAD requests as GET requests
request = request.copy()
request.method = "GET"
root["request"] = request
method = "GET"
root["locale"] = site.locale
context.set_lazy(
".tz",
self._get_tz,
None,
user_timezone=site.user_timezone,
default_timezone=site.timezone,
)
# Request middleware
response = self.run_middleware(
"request", archive, context, request, url, method
)
if response is not None:
return response
def response_middleware(response):
context.safe_delete("._callstack", ".call")
context.root["response"] = response
new_response = self.run_middleware(
"response", archive, context, request, url, method
)
return new_response or response
# Run main views
root["urltrace"] = root["_urltrace"] = []
context.safe_delete("._callstack", ".call")
response = None
try:
for result in self._dispatch_mapper(
archive, context, self.urlmapper, url, method, breakpoint=breakpoint
):
response = self._dispatch_result(archive, context, request, result)
if response:
response = response_middleware(response)
db.commit_sessions(context)
return response
else:
db.commit_sessions(context)
except Exception as e:
db.rollback_sessions(context, close=False)
return self.handle_error(archive, context, request, e, sys.exc_info())
finally:
for thread in context.get("._threads", []):
thread.wait()
context.safe_delete("._threads")
db.close_sessions(context)
root["_urltrace"] = []
# Append slash and redirect if url doesn't end in a slash
if not url.endswith("/") and site.append_slash:
# Check in advance if the url ending with / actually maps to anything
if method in ("HEAD", "GET") and self.urlmapper.has_route(
url + "/", method, None
):
_, ext = splitext(url)
# Don't redirect when the filename has an extension
if not ext:
response = MoyaResponse(
status=StatusCode.temporary_redirect, location=url + "/"
)
return response
if request.method in ["GET", "POST", "HEAD"]:
status_code = StatusCode.not_found
else:
status_code = StatusCode.method_not_allowed
# No response returned, handle 404
return self.dispatch_handler(archive, context, request, status=status_code)
def dispatch_handler(
self,
archive,
context,
request,
status=404,
headers=None,
error=None,
exc_info=None,
):
"""Respond to a status code"""
context.safe_delete(
"._callstack",
".call",
".td",
"._td",
".contentstack",
".content",
".headers",
)
if headers is not None:
context.root["headers"] = headers
moya_trace = None
error2 = None
moya_trace2 = None
if error is not None:
moya_trace = getattr(error, "moya_trace", None)
if moya_trace is None:
try:
moya_trace = trace.build(
context, None, None, error, exc_info, request
)
except Exception as e:
# import traceback; traceback.print_exc(e)
raise
try:
| |
fecbgd fgdeac acebgfd gbe bagc afecg fbead begfa | agcbdfe bg fegba badegfc",
"eadbgc edfba bfecdg acfb egfad ba bea dfceb dcaebf beagdcf | ab eafbd abe adcfgbe",
"fbega afdce gd agcebf becgad bgdeaf fbgd gde ebgfdca efgad | cegdab cefbga fbgd geadf",
"cag agfce gfacde gbcdaf ag gead fbgec cdefa bdafcge bdefac | cfead gca adge bdagcf",
"dc cebfa egacfdb dcf edbfc cbdgfa fdebg gefdba cged fdecbg | dcf eafbc bgacdf dc",
"edbfg fdacge dcgae baegcd bacfgd ecaf geabdfc ecdfg fgc cf | defcag dbfgca ebfdcga cebdga",
"fdgec gd gfd efcbd fdagbc bedg fabedgc gaefc becadf fdbecg | gacfbd fbdcag dgbe edcbf",
"fec dgebf fbdc efbdcga afegcd befagd fdcgeb fc egcfb cgeab | bdefgc dfegba degabfc fc",
"cdb gdfc dc cbdega eadbf egfacb gfcebad cgbfda acbdf bagfc | dgaecb cd bafed dcb",
"dcbga ag cgdaef fgbdeac daebc fbag cag fgcdb ebfgcd fbcgad | ag faegcdb acgdfe dfgcba",
"dabef fa ebacfd dfa bdfec fbgdca cbfegd dbcegaf bgaed efca | gdaeb afd eadbf fa",
"bed dgab gfced afgbedc dfbge geafb efadbc febdga cafgeb db | fgeba gdba aegfcb gfecd",
"da gbacf becdaf ebcfgad ecad cgfbde fedgab dbfca fecbd abd | ad egbfdc bad fcbag",
"efc bgfceda dfbcag ecbg efagd acbgf fbagec aecfg ce bfeacd | egfac ec afecgb fbeacgd",
"aedfbc gaced fabg ab bdgfc gefbcd adb gdbac acdegbf gcfbad | ba bcgdf ebfcda bafegcd",
"cdgfb abegdc fgeacbd feba ecafgd gebfda ef efd dbfeg bdeag | dfe egfdb agefdb bedga",
"cagfb ef fcdgae cfe cgafdb bcedg fbdgcae bcegf feba acgfeb | gefdac egfacd cgebd feadbgc",
"bfgedca cabfgd gbac ebdfa ag abgfd dga egfdac bdcgf gbcefd | cafged bgadf ceafgd ag",
"gebda bc eagdbfc ebdcg cdbgfa cgb ecdgfa cefb gfdec fbgecd | dfgbac cebf dgbecf fcbgade",
"dfbcag dafgc bfd fdgcae gebda ecafbd gfadb gfcb fb dacgbef | gbdfa dagcf dagfc fbaced",
"cd dfebg fcdgb cadb gdeacf eafcbgd gfceab cdg abgcf dgbfca | dc fbadcg cd adbc",
"becafd dbfa feb fb abgcef adgefc cefda bgced edcfb bedgfac | fcdabe bf feb dfab",
"gbadfe fdcage acgef fed ed decg abgfce adcef gebfadc fabcd | ecdg afbcd afdgce fde",
"fcaedb dgbca fdcg efgbdca fbgca befag caf bagfdc dbaegc fc | fca gfacb bgceda fc",
"cabed fbed fd cfd fagecd bfdca bfcdaeg acfgb bedcaf dcgbea | aebfdc ebfd bdcaf fcdgea",
"agdbfc fagbc fgdc dgacebf bcgaef fda ceabd bdfca fbgead fd | fda fdcegab bgafc edbafg",
"cbgfa bfdgea bfdgc ga fga aecbdf gcae egbcaf bcfgdea cefab | ga fcabe fgbdc gfbac",
"cgafe cbagdfe afbce afegdb efgda gebcda gc gdcf cag eagdcf | befca gafed eadbcg gca",
"agfce edgcba egbac gcbf fagbde gdacebf agf cdaef gbeacf gf | fecag bgace gdfbaec bgcfdea",
"be gaeb gdeaf fcdab cbegfd fgeacd efdab fbe cgebfda dagebf | feb dfgacbe faedcg egfbad",
"dfcba cgfdbae gda afedcb bgca bagcfd ga gbdef eadcfg bgafd | bgfad gfebd ag efabdc",
"edgafb dgef agbedc gf abfgec fcdgeba adfbg abdeg fga cfbda | dafecbg badfg bcagdef fag",
"fgc cgfbe ebgaf cbfa cgedbfa cf bgced gecfab defbag fgdcae | gfc cgf gbdfea fcg",
"cga bcadfge aefdgc edgca dafc gdafbe aefcbg agefd ca cdegb | debcg faecgb cga gac",
"dbcaf gcadebf eafcgb dbcefa gbdf cbfdga bcg adbcg gb edacg | afecbg cbg bfcad cabfdg",
"fd dgaeb def fgecb fdebac feadbg eabdgc dfgeb becfdga gafd | fbgce egadb bdgea fagd",
"cedbgf dbcgf febcad dgeafcb efcagd bfge cfg bdcga dbcfe fg | gcf cbgda gcf cadfegb",
"gfabc agc degbaf fgaecb gaefb ca cefa bedgac fedgbca gcfbd | begdca gbdcf cag eacf",
"ebfa be fgbcd afcegd cbe febcd faced egfdcba dbface aecbgd | ceb ceb cbfadeg feab",
"gbfad cdeabgf bdeag de afcdge ade cdbe ebacg geafbc adcbeg | cbdefga gaceb ed bdce",
"fcedbag adfeg fadebg cbgda fdeb fba bf egdcfa gfadb cfbgae | bf fegad gabdf dgcba",
"abde debfgca acfegd ae fgcbe dabgc gbadfc cgeba cgeabd aeg | eag bcfgdae gfebc aegbc",
"afdeb caegfd dcgb bcafge egabfcd fgd cgfbad dgbaf fcgab gd | aebdfgc bcdg agfebcd fdabg",
"ecbafg acdbfe bfge cfaeb cedga cfega dafgbc afg gf bcfgaed | cbdfga fg efagc geacf",
"ecafbg adgfecb fdeac agbd cbgdf cfgbda agf ag bedcgf agcdf | ga ga fag cfdbg",
"gcdbe cbadefg cdf dfea df gfaebc facbed cfbae bagcdf ebfdc | afde aecfb cbfdea caedfgb",
"gcfb beafgd fdg deagc cfbed fg gacdfeb gdcbef fcebad gedfc | cdbef dgf dfg bcfdgea",
"dgebfac eacdbf bdc abgdc adbeg cadgf cbfg cegfad gcbfda cb | cafdeg fdacge facgd bcd",
"cdeag becf fgdbac fc gbfaced edgfc dgebaf gbfdec edgfb cgf | gafdeb daegc bcdfge egdfab",
"aegd cgd abfcd cabgde fdegbc dg ecagb dagfbec cbegfa badcg | ecagbfd debcgf agde bacfd",
"cfdge gb abfed bdg cbge gfedb bgfcdae cbdagf begcdf fcdega | cgbe bgd cfbgda fcedg",
"egdfca eafdcb ecdag dbgfac gefa ag bcfadge agc gcbed dacfe | ebafcdg acefd fdcae agc",
"afbgec edgb dafeg fegab agfdc de eda fbdgcea dfaecb fdegab | ead aed cfebad bceafg",
"dbgc gbeaf acfebd agd afecbgd fabdc ceagfd gbfda cbfagd dg | gcdb efdacb gfdbca gbdc",
"afcedg gbad dgecb gcefb acdbeg dg abedc dcg dabcef gbdcfea | cgdeaf beacfd cbade cgdbe",
"ea egfdba dgceaf daec cfgbe eag befgdac fegca cadfg fabgdc | fgceb adfgc abcfgde gaefcd",
"agdebfc dbgca egb egfd abfde agdfbe fcbade fgeabc dgbea ge | bdgca debaf aedgb fegacb",
"efcbd ecgd fbecgd adgbf egacbf bfcgdae ebg fdebg ge dcebfa | gfceba cbdfeg eg fgceadb",
"dc befgac cdb aced gcbfead dcgefb adfbg acgbd cabeg eacdbg | edcabg bdc dgcaeb acbgefd",
"afgbc dbaf ebcfg ba ecfgdab adcfg bca fcaegd cgabdf gcabed | bfgce egadbc cbgaf acdbefg",
"cagbd adg cegafd acfgebd bdaec gbfd gd gbafdc bgacf faegcb | gebacf dg gda agdbc",
"bc cdgba adfcg ebadg cfbdae ebfgad dgbfcea cba bgec gbdace | bc cb dafgeb cab",
"gae bfegc edfac gdca ga begdaf cgeaf edafcg bafedc cfbedga | adfcge ecdfag fabcde aecfdb",
"cdfge db eabcf fdb bade cbfed cbfdaeg gfbdca dcbefa cgfabe | dbf cbagdef db abed",
"efa facg beadcg gacfde cbfed dcefbag adceg fa gedbfa cfdea | agecd gcbaed afcde eacdg",
"dbgca dfbeg fcg adbcgf bafgce fdca cgbdf gabdcfe dbecga cf | gacefb cabdg dfca fcg",
"dbfcae gc decg ebadc cga abegf cbfaegd cbage gabfcd agdebc | edbcga gebca cebda baegdc",
"becagd gefcdba bgaef gfadcb fgd fadc bdacg dfagb fd ecdbgf | dgf fgaeb cdaf fd",
"egfcab dcfgea caebg ag cbeafd edfabgc gfba bcdeg acg ecafb | cfbaeg gac acgfed eabcg",
"gbc gfbad fgbac beadgf cg fbeac gdfbce dbgcaf bgcfead cgda | gacd edfgbc cbg gafbed",
"fgdba begcad fg gfd fdacb debfag egdba adfcbge fgbedc afeg | afdbg bdaeg badfg eagcdb",
"cefgdb fdecga beafg agd acbd badge cgfdeba cebdg ad dacbeg | cabd cedafg cfedag fgaeb",
"fgc cdefb beafcg dcfbg adbgfe cgad acefbgd adbfg cg afdgcb | fegcdab bagfecd gfdcb gcad",
"cbagfe cbedgaf bfgde cfdeab gfeba df fgda dcbeg afebgd bfd | fd adfg ebgcd fcabge",
"ebgaf ad ebacgd adfgb cadf abfdcg badgecf cdgfb bad ebcgfd | bdegcfa cfda fbaeg gbcfad",
"adfcg dabecg gaebfcd ecfbag bgacf gdc bgdf cbfagd eadcf gd | dg cfgab fcgab agbfce",
"ecfgdb dcgbfa fdage ecda ad ebfag dfa bfadecg fadecg cegfd | ad dgefc cdae bgdecf",
"agcfed eabdfcg aec ce gacbd gadec geafd cefd bagdef cgafbe | ce aefbdgc egcbfa gaecfd",
"gdace fe bdgafc efa dbafecg fgeb bdafce beadfg egdfa dgfab | ecgda fe fbgad fedabg",
"fg bdgcefa gabed defga fbeacd bdcafg cfeg gfd edcfga fdeac | agfde fg fcbade dfgea",
"dcgfbe egdfc gcbd dc geafd becdaf gcabfe cde efbcg gedbfca | dc fgead dgcef gdcb",
"gdefa efadbc cfdebga adfebg eaf ebag ea adfbg cfdeg acgfdb | fgdbaec cabgfed dgafe efa",
"fecb fcegbda dgeaf eagdbc eb dbegfc cfgbad beg gbedf fbgdc | bcef dfegb gbdfca dgcbafe",
"befg bafde afgdc degfa cdeafbg edafbc dge eagcdb gdbafe eg | dge efbad becgda aegdbf",
"ac adfgc cedfg bedgac cda ecfagd degbcf fecabdg afec badfg | gdfab dcgaf fgced cegafdb",
"cda badcg edcgb geca ca bdaegc dbfaec decgbf agfbd ecdgfab | ac bdfag ca edgbfc",
"fgac febgca fg ebacf bgfecd gfb gbcfdea badeg bfage bedfca | gbefcda fg bdfceg gcfa",
"bcgfade ce fcge dbgcfa cdafe badgec acgfd gceadf befad edc | bagcfed ecfda fcgad dce",
"ebacfg efbad fcbaedg age dfagc adefg cdge eacgfd ge dgfabc | geabfc cdfagb eafdgc gae",
"abd cgbde da gdcaeb cagd baecd faceb afdebg ceagbdf egfdbc | agdc gadc gedbcaf bcafe",
"eg bdfgec dcfbe facebg efg gabcedf dcgfe cedfab afgcd debg | ecgdf cgefbd cdgef feg",
"eagf efbdgc dga dcefg cdgfea bcdea cdgfab ga | |
break
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces.keys():
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Virtual.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = module.get_bin_path('lscpu')
if lscpu:
rc, out, err = module.run_command(["lscpu"])
if rc == 0:
for line in out.split("\n"):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
rc, out, err = module.run_command("/usr/sbin/prtdiag")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
# Check if it's a zone
if os.path.exists("/usr/bin/zonename"):
rc, out, err = module.run_command("/usr/bin/zonename")
if out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
rc, out, err = module.run_command("/usr/sbin/modinfo")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
# Detect domaining on Sparc hardware
if os.path.exists("/usr/sbin/virtinfo"):
# The output of | |
#!/usr/bin/python3
from pkg_resources import parse_version
import json
import sys
import importlib
import traceback
import configparser
import os
import os.path
from websocket import create_connection
from queue import Queue
from sseclient import SSEClient
import appdaemon.conf as conf
import time
import datetime
import signal
import uuid
import astral
import pytz
import math
import appdaemon.rundash as appdash
import asyncio
import yaml
import concurrent
import threading
import appdaemon.utils as utils
import appdaemon.appapi as appapi
q = Queue(maxsize=0)
conf.was_dst = None
conf.last_state = None
appapi.reading_messages = False
inits = {}
ws = None
def init_sun():
latitude = conf.latitude
longitude = conf.longitude
if -90 > latitude < 90:
raise ValueError("Latitude needs to be -90 .. 90")
if -180 > longitude < 180:
raise ValueError("Longitude needs to be -180 .. 180")
elevation = conf.elevation
conf.tz = pytz.timezone(conf.time_zone)
conf.location = astral.Location((
'', '', latitude, longitude, conf.tz.zone, elevation
))
def update_sun():
# now = datetime.datetime.now(conf.tz)
now = conf.tz.localize(utils.get_now())
mod = -1
while True:
try:
next_rising_dt = conf.location.sunrise(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_rising_dt > now:
break
except astral.AstralError:
pass
mod += 1
mod = -1
while True:
try:
next_setting_dt = conf.location.sunset(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_setting_dt > now:
break
except astral.AstralError:
pass
mod += 1
old_next_rising_dt = conf.sun.get("next_rising")
old_next_setting_dt = conf.sun.get("next_setting")
conf.sun["next_rising"] = next_rising_dt
conf.sun["next_setting"] = next_setting_dt
if old_next_rising_dt is not None and old_next_rising_dt != conf.sun["next_rising"]:
#dump_schedule()
process_sun("next_rising")
#dump_schedule()
if old_next_setting_dt is not None and old_next_setting_dt != conf.sun["next_setting"]:
#utils.log(conf.logger, "INFO", "Old next setting: {} New next setting: {}".format(old_next_setting_dt, conf.sun["next_setting"]))
#dump_schedule()
process_sun("next_setting")
#dump_schedule()
def is_dst():
return bool(time.localtime(utils.get_now_ts()).tm_isdst)
def stopit():
global ws
conf.stopping = True
if ws is not None:
ws.close()
conf.appq.put_nowait({"event_type": "ha_stop", "data": None})
# noinspection PyUnusedLocal
def handle_sig(signum, frame):
if signum == signal.SIGUSR1:
dump_schedule()
dump_callbacks()
dump_objects()
dump_queue()
dump_sun()
if signum == signal.SIGHUP:
read_apps(True)
if signum == signal.SIGINT:
utils.log(conf.logger, "INFO", "Keyboard interrupt")
stopit()
def dump_sun():
utils.log(conf.logger, "INFO", "--------------------------------------------------")
utils.log(conf.logger, "INFO", "Sun")
utils.log(conf.logger, "INFO", "--------------------------------------------------")
utils.log(conf.logger, "INFO", conf.sun)
utils.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_schedule():
if conf.schedule == {}:
utils.log(conf.logger, "INFO", "Schedule is empty")
else:
utils.log(conf.logger, "INFO", "--------------------------------------------------")
utils.log(conf.logger, "INFO", "Scheduler Table")
utils.log(conf.logger, "INFO", "--------------------------------------------------")
for name in conf.schedule.keys():
utils.log(conf.logger, "INFO", "{}:".format(name))
for entry in sorted(
conf.schedule[name].keys(),
key=lambda uuid_: conf.schedule[name][uuid_]["timestamp"]
):
utils.log(
conf.logger, "INFO",
" Timestamp: {} - data: {}".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(
conf.schedule[name][entry]["timestamp"]
)),
conf.schedule[name][entry]
)
)
utils.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_callbacks():
if conf.callbacks == {}:
utils.log(conf.logger, "INFO", "No callbacks")
else:
utils.log(conf.logger, "INFO", "--------------------------------------------------")
utils.log(conf.logger, "INFO", "Callbacks")
utils.log(conf.logger, "INFO", "--------------------------------------------------")
for name in conf.callbacks.keys():
utils.log(conf.logger, "INFO", "{}:".format(name))
for uuid_ in conf.callbacks[name]:
utils.log(conf.logger, "INFO", " {} = {}".format(uuid_, conf.callbacks[name][uuid_]))
utils.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_objects():
utils.log(conf.logger, "INFO", "--------------------------------------------------")
utils.log(conf.logger, "INFO", "Objects")
utils.log(conf.logger, "INFO", "--------------------------------------------------")
for object_ in conf.objects.keys():
utils.log(conf.logger, "INFO", "{}: {}".format(object_, conf.objects[object_]))
utils.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_queue():
utils.log(conf.logger, "INFO", "--------------------------------------------------")
utils.log(conf.logger, "INFO", "Current Queue Size is {}".format(q.qsize()))
utils.log(conf.logger, "INFO", "--------------------------------------------------")
def check_constraint(key, value):
unconstrained = True
with conf.ha_state_lock:
if key == "constrain_input_boolean":
values = value.split(",")
if len(values) == 2:
entity = values[0]
state = values[1]
else:
entity = value
state = "on"
if entity in conf.ha_state and conf.ha_state[entity]["state"] != state:
unconstrained = False
if key == "constrain_input_select":
values = value.split(",")
entity = values.pop(0)
if entity in conf.ha_state and conf.ha_state[entity]["state"] not in values:
unconstrained = False
if key == "constrain_presence":
if value == "everyone" and not utils.everyone_home():
unconstrained = False
elif value == "anyone" and not utils.anyone_home():
unconstrained = False
elif value == "noone" and not utils.noone_home():
unconstrained = False
if key == "constrain_days":
if today_is_constrained(value):
unconstrained = False
return unconstrained
def check_time_constraint(args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if not utils.now_is_between(start_time, end_time, name):
unconstrained = False
return unconstrained
def dispatch_worker(name, args):
unconstrained = True
#
# Argument Constraints
#
for arg in conf.app_config[name].keys():
if not check_constraint(arg, conf.app_config[name][arg]):
unconstrained = False
if not check_time_constraint(conf.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
if "kwargs" in args:
for arg in args["kwargs"].keys():
if not check_constraint(arg, args["kwargs"][arg]):
unconstrained = False
if not check_time_constraint(args["kwargs"], name):
unconstrained = False
if unconstrained:
q.put_nowait(args)
def today_is_constrained(days):
day = utils.get_now().weekday()
daylist = [utils.day_of_week(day) for day in days.split(",")]
if day in daylist:
return False
return True
def process_sun(action):
utils.log(
conf.logger, "DEBUG",
"Process sun: {}, next sunrise: {}, next sunset: {}".format(
action, conf.sun["next_rising"], conf.sun["next_setting"]
)
)
with conf.schedule_lock:
for name in conf.schedule.keys():
for entry in sorted(
conf.schedule[name].keys(),
key=lambda uuid_: conf.schedule[name][uuid_]["timestamp"]
):
schedule = conf.schedule[name][entry]
if schedule["type"] == action and "inactive" in schedule:
del schedule["inactive"]
c_offset = utils.get_offset(schedule)
schedule["timestamp"] = utils.calc_sun(action) + c_offset
schedule["offset"] = c_offset
# noinspection PyBroadException
def exec_schedule(name, entry, args):
try:
# Locking performed in calling function
if "inactive" in args:
return
# Call function
if "entity" in args["kwargs"]:
dispatch_worker(name, {
"name": name,
"id": conf.objects[name]["id"],
"type": "attr",
"function": args["callback"],
"attribute": args["kwargs"]["attribute"],
"entity": args["kwargs"]["entity"],
"new_state": args["kwargs"]["new_state"],
"old_state": args["kwargs"]["old_state"],
"kwargs": args["kwargs"],
})
else:
dispatch_worker(name, {
"name": name,
"id": conf.objects[name]["id"],
"type": "timer",
"function": args["callback"],
"kwargs": args["kwargs"],
})
# If it is a repeating entry, rewrite with new timestamp
if args["repeat"]:
if args["type"] == "next_rising" or args["type"] == "next_setting":
# Its sunrise or sunset - if the offset is negative we
# won't know the next rise or set time yet so mark as inactive
# So we can adjust with a scan at sun rise/set
if args["offset"] < 0:
args["inactive"] = 1
else:
# We have a valid time for the next sunrise/set so use it
c_offset = utils.get_offset(args)
args["timestamp"] = utils.calc_sun(args["type"]) + c_offset
args["offset"] = c_offset
else:
# Not sunrise or sunset so just increment
# the timestamp with the repeat interval
args["basetime"] += args["interval"]
args["timestamp"] = args["basetime"] + utils.get_offset(args)
else: # Otherwise just delete
del conf.schedule[name][entry]
except:
utils.log(conf.error, "WARNING", '-' * 60)
utils.log(
conf.error, "WARNING",
"Unexpected error during exec_schedule() for App: {}".format(name)
)
utils.log(conf.error, "WARNING", "Args: {}".format(args))
utils.log(conf.error, "WARNING", '-' * 60)
utils.log(conf.error, "WARNING", traceback.format_exc())
utils.log(conf.error, "WARNING", '-' * 60)
if conf.errorfile != "STDERR" and conf.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# log messages about writing an error (since they show up anyway)
utils.log(conf.logger, "WARNING", "Logged an error to {}".format(conf.errorfile))
utils.log(conf.error, "WARNING", "Scheduler entry has been deleted")
utils.log(conf.error, "WARNING", '-' * 60)
del conf.schedule[name][entry]
@asyncio.coroutine
def do_every(period, f):
t = math.floor(utils.get_now_ts())
count = 0
t_ = math.floor(time.time())
while not conf.stopping:
count += 1
delay = max(t_ + count * period - time.time(), 0)
yield from asyncio.sleep(delay)
t += conf.interval
r = yield from f(t)
if r is not None and r != t:
#print("r: {}, t: {}".format(r,t))
t = r
t_ = r
count = 0
# noinspection PyBroadException,PyBroadException
def do_every_second(utc):
try:
start_time = datetime.datetime.now().timestamp()
now = datetime.datetime.fromtimestamp(utc)
conf.now = utc
# If we have reached endtime bail out
if conf.endtime is not None and utils.get_now() >= conf.endtime:
utils.log(conf.logger, "INFO", "End time reached, exiting")
stopit()
if conf.realtime:
real_now = datetime.datetime.now().timestamp()
delta = abs(utc - real_now)
if delta > 1:
utils.log(conf.logger, "WARNING", "Scheduler clock skew detected - delta = {} - resetting".format(delta))
return real_now
# Update sunrise/sunset etc.
update_sun()
# Check if we have entered or exited DST - if so, reload apps
# to ensure all time callbacks are recalculated
now_dst = is_dst()
if now_dst != conf.was_dst:
utils.log(
conf.logger, "INFO",
"Detected change in DST from {} to {} -"
" reloading all modules".format(conf.was_dst, now_dst)
)
# dump_schedule()
utils.log(conf.logger, "INFO", "-" * 40)
yield from utils.run_in_executor(conf.loop, conf.executor, read_apps, True)
# dump_schedule()
conf.was_dst = now_dst
# dump_schedule()
# test code for clock skew
#if random.randint(1, 10) == 5:
# time.sleep(random.randint(1,20))
# Check to see if any apps have changed but only if we have valid state
if conf.last_state is not None and appapi.reading_messages:
yield from utils.run_in_executor(conf.loop, conf.executor, read_apps)
# Check to see if config has changed
if appapi.reading_messages:
yield from utils.run_in_executor(conf.loop, conf.executor, check_config)
# Call me suspicious, but lets update state form HA periodically
# in case we miss events for whatever reason
# Every 10 minutes seems like a good place to start
if conf.last_state is not None and appapi.reading_messages and now - conf.last_state > datetime.timedelta(minutes=10) and conf.ha_url is not None:
try:
yield from utils.run_in_executor(conf.loop, conf.executor, get_ha_state)
conf.last_state = now
except:
utils.log(conf.logger, "WARNING", "Unexpected error refreshing HA state | |
<gh_stars>0
from ._glib import *
# gobject - C library
libgobject = CDLL(find_library('gobject-2.0'))
#
# GType
#
class GType(gsize): pass
class GTypeInterface(Structure):
_fields_ = [
('g_type', GType),
('g_instance_type', GType),
]
class GTypeClass(Structure):
_fields_ = [
('g_type', GType),
]
class GTypeInstance(Structure):
_fields_ = [
('g_class', POINTER(GTypeClass)),
]
# class GTypeInfo(Structure): pass
# class GTypeFundamentalInfo(Structure): pass
# class GInterfaceInfo(Structure): pass
# class GTypeValueTable(Structure): pass
GTypeDebugFlags = gint
G_TYPE_DEBUG_NONE = GTypeDebugFlags(0)
G_TYPE_DEBUG_OBJECTS = GTypeDebugFlags(1 << 0)
G_TYPE_DEBUG_SIGNALS = GTypeDebugFlags(1 << 1)
G_TYPE_DEBUG_MASK = GTypeDebugFlags(0x03)
# class GTypeQuery(Structure):
# _fields_ = [
# ('type', GType),
# ('type_name', gchar_p),
# ('class_size', guint),
# ('instance_size', guint),
# ]
GTypeFlags = gint
G_TYPE_FLAG_ABSTRACT = GTypeFlags(1 << 4)
G_TYPE_FLAG_VALUE_ABSTRACT = GTypeFlags(1 << 5)
# GTypeFundamentalFlags = gint
# G_TYPE_FLAG_CLASSED = GTypeFundamentalFlags(1 << 0)
# G_TYPE_FLAG_INSTANTIATABLE = GTypeFundamentalFlags(1 << 1)
# G_TYPE_FLAG_DERIVABLE = GTypeFundamentalFlags(1 << 2)
# G_TYPE_FLAG_DEEP_DERIVABLE = GTypeFundamentalFlags(1 << 3)
#
# GTypePlugin
#
# class GTypePlugin(Structure): pass
# class GTypePluginClass(Structure): pass
#
# GTypeModule
#
# class GTypeModule(Structure):
# _fields_ = [
# ('name', gchar_p),
# ]
# class GTypeModuleClass(Structure): pass
#
# GObject
#
class GObject(Structure):
_fields_ = [
('g_type_instance', GTypeInstance),
('ref_count', guint),
('qdata', POINTER(GData)),
]
class GObjectClass(Structure):
_fields_ = [
('g_type_class', GTypeClass),
]
# class GObjectConstructParam(Structure): pass
# class GInitiallyUnowned(GObject): pass
# class GInitiallyUnownedClass(GObjectClass): pass
#
# GEnum/GFlags
#
# class GEnumClass(Structure): pass
# class GFlagsClass(Structure): pass
# class GEnumValue(Structure): pass
# class GFlagsValue(Structure): pass
#
# GBoxed
#
# GStrv = POINTER(gchar_p)
#
# GValue
#
class GValue_union0(Union):
_fields_ = [
('v_int', gint),
('v_uint', guint),
('v_long', glong),
('v_ulong', gulong),
('v_int64', gint64),
('v_uint64', guint64),
('v_float', gfloat),
('v_double', gdouble),
('v_pointer', gpointer),
]
class GValue(Structure):
_fields_ = [
('g_type', GType),
('data', GValue_union0 * 2),
]
class GParameter(Structure):
_fields_ = [
('name', gchar_p),
('value', GValue),
]
#
# GParamSpec/GValue
class GParamSpecBoolean(Structure): pass
class GParamSpecChar(Structure): pass
class GParamSpecUChar(Structure): pass
class GParamSpecInt(Structure): pass
class GParamSpecUInt(Structure): pass
class GParamSpecLong(Structure): pass
class GParamSpecULong(Structure): pass
class GParamSpecInt64(Structure): pass
class GParamSpecUInt64(Structure): pass
class GParamSpecFloat(Structure): pass
class GParamSpecDouble(Structure): pass
class GParamSpecEnum(Structure): pass
class GParamSpecFlags(Structure): pass
class GParamSpecString(Structure): pass
gchararray = POINTER(gchar_p)
class GParamSpecParam(Structure): pass
class GParamSpecBoxed(Structure): pass
class GParamSpecPointer(Structure): pass
class GParamSpecObject(Structure): pass
class GParamSpecUnichar(Structure): pass
class GParamSpecValueArray(Structure): pass
class GParamSpecOverride(Structure): pass
class GParamSpecGType(Structure): pass
class GParamSpecVariant(Structure): pass
#
# GParamSpec
#
class GParamSpec(Structure): pass
# class GParamSpecClass(Structure): pass
GParamFlags = gint
G_PARAM_READABLE = GParamFlags(1 << 0)
G_PARAM_WRITABLE = GParamFlags(1 << 1)
G_PARAM_CONSTRUCT = GParamFlags(1 << 2)
G_PARAM_CONSTRUCT_ONLY = GParamFlags(1 << 3)
G_PARAM_LAX_VALIDATION = GParamFlags(1 << 4)
G_PARAM_STATIC_NAME = GParamFlags(1 << 5)
G_PARAM_PRIVATE = G_PARAM_STATIC_NAME
G_PARAM_STATIC_NICK = GParamFlags(1 << 6)
G_PARAM_STATIC_BLURB = GParamFlags(1 << 7)
G_PARAM_DEPRECATED = GParamFlags(1 << 31)
class GParamSpecTypeInfo(Structure): pass
class GParamSpecPool(Structure): pass
#
# GTypeCValue
#
# class GTypeCValue(Union):
# _fields_ = [
# ('v_int', gint),
# ('v_long', glong),
# ('v_int64', gint64),
# ('v_double', gdouble),
# ('v_pointer', gpointer),
# ]
#
# GSignal
#
# class GSignalInvocationHint(Structure): pass
# class GSignalCMarshaller(Structure): pass
GSignalFlags = gint
G_SIGNAL_RUN_FIRST = GSignalFlags(1 << 0)
G_SIGNAL_RUN_LAST = GSignalFlags(1 << 1)
G_SIGNAL_RUN_CLEANUP = GSignalFlags(1 << 2)
G_SIGNAL_NO_RECURSE = GSignalFlags(1 << 3)
G_SIGNAL_DETAILED = GSignalFlags(1 << 4)
G_SIGNAL_ACTION = GSignalFlags(1 << 5)
G_SIGNAL_NO_HOOKS = GSignalFlags(1 << 6)
# GSignalMatchType = gint
# G_SIGNAL_MATCH_ID = GSignalMatchType(1 << 0)
# G_SIGNAL_MATCH_DETAIL = GSignalMatchType(1 << 1)
# G_SIGNAL_MATCH_CLOSURE = GSignalMatchType(1 << 2)
# G_SIGNAL_MATCH_FUNC = GSignalMatchType(1 << 3)
# G_SIGNAL_MATCH_DATA = GSignalMatchType(1 << 4)
# G_SIGNAL_MATCH_UNBLOCKED = GSignalMatchType(1 << 5)
# class GSignalQuery(Structure): pass
GConnectFlags = gint
G_CONNECT_AFTER = GConnectFlags(1 << 0)
G_CONNECT_SWAPPED = GConnectFlags(1 << 1)
#
# GClosure
#
class GClosure(Structure):
_fields_ = [
('refcount_metamarshal_nguards_nfnotifiers_ninotifiers_ininotify_floating_derivativeflag_inmarshal_isinvalid', guint),
('marshal', gpointer),
('data', gpointer),
('notifiers', gpointer),
]
# class GCClosure(Structure):
# _fields_ = [
# ('closure', GClosure),
# ('callback', gpointer),
# ]
#
# GValueArray
#
# class GValueArray(Structure):
# _fields_ = [
# ('n_values', guint),
# ('values', POINTER(GValue)),
# ]
#
# GBinding
#
# class GBinding(Structure): pass
# GBindingFlags = gint
# G_BINDING_DEFAULT = GBindingFlags(0)
# G_BINDING_BIDIRECTIONAL = GBindingFlags(1 << 0)
# G_BINDING_SYNC_CREATE = GBindingFlags(1 << 1)
# G_BINDING_INVERT_BOOLEAN = GBindingFlags(1 << 2)
#
# GType
#
g_type_init = ctypes_get_func(
libgobject,
'g_type_init',
None,
)
#
# SPECIAL: necessary for rest of function calls
#
g_type_init()
g_type_init_with_debug_flags = ctypes_get_func(
libgobject,
'g_type_init_with_debug_flags',
None,
GTypeDebugFlags,
)
g_type_name = ctypes_get_func(
libgobject,
'g_type_name',
gchar_p,
GType,
)
# g_type_qname = ctypes_get_func(
# libgobject,
# 'g_type_qname',
# GQuark,
# GType,
# )
# g_type_from_name = ctypes_get_func(
# libgobject,
# 'g_type_from_name',
# GType,
# gchar_p,
# )
# g_type_parent = ctypes_get_func(
# libgobject,
# 'g_type_parent',
# GType,
# GType,
# )
# g_type_depth = ctypes_get_func(
# libgobject,
# 'g_type_depth',
# guint,
# GType,
# )
# g_type_next_base = ctypes_get_func(
# libgobject,
# 'g_type_next_base',
# GType,
# GType,
# GType,
# )
# g_type_is_a = ctypes_get_func(
# libgobject,
# 'g_type_is_a',
# gboolean,
# GType,
# GType,
# )
# g_type_class_ref = ctypes_get_func(
# libgobject,
# 'g_type_class_ref',
# gpointer,
# GType,
# )
# g_type_class_peek = ctypes_get_func(
# libgobject,
# 'g_type_class_peek',
# gpointer,
# GType,
# )
# g_type_class_peek_static = ctypes_get_func(
# libgobject,
# 'g_type_class_peek_static',
# gpointer,
# GType,
# )
# g_type_class_unref = ctypes_get_func(
# libgobject,
# 'g_type_class_unref',
# None,
# gpointer,
# )
# g_type_class_peek_parent = ctypes_get_func(
# libgobject,
# 'g_type_class_peek_parent',
# gpointer,
# gpointer,
# )
# g_type_class_add_private = ctypes_get_func(
# libgobject,
# 'g_type_class_add_private',
# None,
# gpointer,
# gsize,
# )
# g_type_add_class_private = ctypes_get_func(
# libgobject,
# 'g_type_add_class_private',
# None,
# GType,
# gsize,
# )
# g_type_interface_peek = ctypes_get_func(
# libgobject,
# 'g_type_interface_peek',
# gpointer,
# gpointer,
# GType,
# )
# g_type_interface_peek_parent = ctypes_get_func(
# libgobject,
# 'g_type_interface_peek_parent',
# gpointer,
# gpointer,
# )
# g_type_default_interface_ref = ctypes_get_func(
# libgobject,
# 'g_type_default_interface_ref',
# gpointer,
# GType,
# )
# g_type_default_interface_peek = ctypes_get_func(
# libgobject,
# 'g_type_default_interface_peek',
# gpointer,
# GType,
# )
# g_type_default_interface_unref = ctypes_get_func(
# libgobject,
# 'g_type_default_interface_unref',
# None,
# gpointer,
# )
# g_type_children = ctypes_get_func(
# libgobject,
# 'g_type_children',
# POINTER(GType),
# GType,
# POINTER(guint),
# )
# g_type_interfaces = ctypes_get_func(
# libgobject,
# 'g_type_interfaces',
# POINTER(GType),
# GType,
# POINTER(guint),
# )
# g_type_interface_prerequisites = ctypes_get_func(
# libgobject,
# 'g_type_interface_prerequisites',
# POINTER(GType),
# GType,
# POINTER(guint),
# )
# g_type_set_qdata = ctypes_get_func(
# libgobject,
# 'g_type_set_qdata',
# None,
# GType,
# GQuark,
# gpointer,
# )
# g_type_get_qdata = ctypes_get_func(
# libgobject,
# 'g_type_get_qdata',
# gpointer,
# GType,
# GQuark,
# )
# g_type_query = ctypes_get_func(
# libgobject,
# 'g_type_query',
# None,
# GType,
# POINTER(GTypeQuery),
# )
# GBaseInitFunc = CFUNCTYPE(None, gpointer)
# GBaseFinalizeFunc = CFUNCTYPE(None, gpointer)
# GClassInitFunc = CFUNCTYPE(None, gpointer, gpointer)
# GClassFinalizeFunc = CFUNCTYPE(None, gpointer, gpointer)
# GInstanceInitFunc = CFUNCTYPE(None, POINTER(GTypeInstance), gpointer)
# GInterfaceInitFunc = CFUNCTYPE(None, gpointer, gpointer)
# GInterfaceFinalizeFunc = CFUNCTYPE(None, gpointer, gpointer)
# GTypeClassCacheFunc = CFUNCTYPE(gboolean, gpointer, POINTER(GTypeClass))
# g_type_register_static = ctypes_get_func(
# libgobject,
# 'g_type_register_static',
# GType,
# GType,
# gchar_p,
# POINTER(GTypeInfo),
# GTypeFlags,
# )
# g_type_register_static_simple = ctypes_get_func(
# libgobject,
# 'g_type_register_static_simple',
# GType,
# GType,
# gchar_p,
# guint,
# GClassInitFunc,
# guint,
# GInstanceInitFunc,
# GTypeFlags,
# )
# g_type_register_dynamic = ctypes_get_func(
# libgobject,
# 'g_type_register_dynamic',
# GType,
# GType,
# gchar_p,
# POINTER(GTypePlugin),
# GTypeFlags,
# )
# g_type_register_fundamental = ctypes_get_func(
# libgobject,
# 'g_type_register_fundamental',
# GType,
# GType,
# gchar_p,
# POINTER(GTypeInfo),
# POINTER(GTypeFundamentalInfo),
# GTypeFlags,
# )
# g_type_add_interface_static = ctypes_get_func(
# libgobject,
# 'g_type_add_interface_static',
# None,
# GType,
# GType,
# POINTER(GInterfaceInfo),
# )
# g_type_add_interface_dynamic = ctypes_get_func(
# libgobject,
# 'g_type_add_interface_dynamic',
# None,
# GType,
# GType,
# POINTER(GTypePlugin),
# )
# g_type_interface_add_prerequisite = ctypes_get_func(
# libgobject,
# 'g_type_interface_add_prerequisite',
# None,
# GType,
# GType,
# )
# g_type_get_plugin = ctypes_get_func(
# libgobject,
# 'g_type_get_plugin',
# POINTER(GTypePlugin),
# GType,
# )
# g_type_interface_get_plugin = ctypes_get_func(
# libgobject,
# 'g_type_interface_get_plugin',
# POINTER(GTypePlugin),
# GType,
# GType,
# )
# g_type_fundamental_next = ctypes_get_func(
# libgobject,
# 'g_type_fundamental_next',
# GType,
# )
g_type_fundamental = ctypes_get_func(
libgobject,
'g_type_fundamental',
GType,
GType,
)
g_type_create_instance = ctypes_get_func(
libgobject,
'g_type_create_instance',
POINTER(GTypeInstance),
GType,
)
g_type_free_instance = ctypes_get_func(
libgobject,
'g_type_free_instance',
None,
POINTER(GTypeInstance),
)
# g_type_add_class_cache_func = ctypes_get_func(
# libgobject,
# 'g_type_add_class_cache_func',
# None,
# gpointer,
# GTypeClassCacheFunc,
# )
# g_type_remove_class_cache_func = ctypes_get_func(
# libgobject,
# 'g_type_remove_class_cache_func',
# None,
# gpointer,
# GTypeClassCacheFunc,
# )
# g_type_class_unref_uncached = ctypes_get_func(
# libgobject,
# 'g_type_class_unref_uncached',
# None,
# gpointer,
# )
# GTypeInterfaceCheckFunc = CFUNCTYPE(None, gpointer, gpointer)
# g_type_add_interface_check = ctypes_get_func(
# libgobject,
# 'g_type_add_interface_check',
# None,
# gpointer,
# GTypeInterfaceCheckFunc,
# )
# g_type_remove_interface_check = ctypes_get_func(
# libgobject,
# 'g_type_remove_interface_check',
# None,
# gpointer,
# GTypeInterfaceCheckFunc,
# )
# g_type_value_table_peek = ctypes_get_func(
# libgobject,
# 'g_type_value_table_peek',
# POINTER(GTypeValueTable),
# GType,
# )
#
# GTypePlugin
#
# GTypePluginUse = CFUNCTYPE(None, POINTER(GTypePlugin))
# GTypePluginUnuse = CFUNCTYPE(None, POINTER(GTypePlugin))
# GTypePluginCompleteTypeInfo = CFUNCTYPE(None, POINTER(GTypePlugin), GType, POINTER(GTypeInfo), POINTER(GTypeValueTable))
# GTypePluginCompleteInterfaceInfo = CFUNCTYPE(None, POINTER(GTypePlugin), GType, GType, POINTER(GInterfaceInfo))
# g_type_plugin_use = ctypes_get_func(
# libgobject,
# 'g_type_plugin_use',
# None,
# POINTER(GTypePlugin),
# )
# g_type_plugin_unuse = ctypes_get_func(
# libgobject,
# 'g_type_plugin_unuse',
# None,
# POINTER(GTypePlugin),
# )
# g_type_plugin_complete_type_info = ctypes_get_func(
# libgobject,
# 'g_type_plugin_complete_type_info',
# None,
# POINTER(GTypePlugin),
# GType,
# POINTER(GTypeInfo),
# POINTER(GTypeValueTable),
# )
# g_type_plugin_complete_interface_info = ctypes_get_func(
# libgobject,
# 'g_type_plugin_complete_interface_info',
# None,
# POINTER(GTypePlugin),
# GType,
# GType,
# POINTER(GInterfaceInfo),
# )
#
# GTypeModule
#
# g_type_module_use = ctypes_get_func(
# libgobject,
# 'g_type_module_use',
# gboolean,
# POINTER(GTypeModule),
# )
# g_type_module_unuse = ctypes_get_func(
# libgobject,
# 'g_type_module_unuse',
# None,
# POINTER(GTypeModule),
# )
# g_type_module_set_name = ctypes_get_func(
# libgobject,
# 'g_type_module_set_name',
# None,
# POINTER(GTypeModule),
# gchar_p,
# )
# g_type_module_register_type = ctypes_get_func(
# libgobject,
# 'g_type_module_register_type',
# GType,
# POINTER(GTypeModule),
# GType,
# gchar_p,
# POINTER(GTypeInfo),
# GTypeFlags,
# )
# g_type_module_add_interface = ctypes_get_func(
# libgobject,
# 'g_type_module_add_interface',
# None,
# POINTER(GTypeModule),
# GType,
# GType,
# POINTER(GInterfaceInfo),
# )
# g_type_module_register_enum = ctypes_get_func(
# libgobject,
# 'g_type_module_register_enum',
# GType,
# POINTER(GTypeModule),
# gchar_p,
# POINTER(GEnumValue),
# )
# g_type_module_register_flags = ctypes_get_func(
# libgobject,
# 'g_type_module_register_flags',
# GType,
# POINTER(GTypeModule),
# gchar_p,
# POINTER(GFlagsValue),
# )
#
# GObject
#
GObjectGetPropertyFunc = CFUNCTYPE(None, POINTER(GObject), guint, POINTER(GValue), POINTER(GParamSpec))
GObjectSetPropertyFunc = CFUNCTYPE(None, POINTER(GObject), guint, POINTER(GValue), POINTER(GParamSpec))
GObjectFinalizeFunc = CFUNCTYPE(None, POINTER(GObject))
g_object_class_install_property = ctypes_get_func(
libgobject,
'g_object_class_install_property',
None,
POINTER(GObjectClass),
guint,
POINTER(GParamSpec),
)
g_object_class_install_properties = ctypes_get_func(
libgobject,
'g_object_class_install_properties',
None,
POINTER(GObjectClass),
guint,
POINTER(GParamSpec),
)
g_object_class_find_property = ctypes_get_func(
libgobject,
'g_object_class_find_property',
POINTER(GParamSpec),
POINTER(GObjectClass),
gchar_p,
)
g_object_class_list_properties = ctypes_get_func(
libgobject,
'g_object_class_list_properties',
POINTER(POINTER(GParamSpec)),
POINTER(GObjectClass),
POINTER(guint),
)
g_object_class_override_property = ctypes_get_func(
libgobject,
'g_object_class_override_property',
None,
POINTER(GObjectClass),
guint,
gchar_p,
)
g_object_interface_install_property = ctypes_get_func(
libgobject,
'g_object_interface_install_property',
None,
gpointer,
POINTER(GParamSpec),
)
g_object_interface_find_property = ctypes_get_func(
libgobject,
'g_object_interface_find_property',
None,
POINTER(GParamSpec),
gpointer,
gchar_p,
)
g_object_interface_list_properties = ctypes_get_func(
libgobject,
'g_object_interface_list_properties',
POINTER(POINTER(GParamSpec)),
gpointer,
POINTER(guint),
)
g_object_new = ctypes_get_func(
libgobject,
'g_object_new',
gpointer,
#GType,
#gchar_p,
#...,
)
g_object_newv = ctypes_get_func(
libgobject,
'g_object_newv',
gpointer,
GType,
guint,
POINTER(GParameter),
)
g_object_ref = ctypes_get_func(
libgobject,
'g_object_ref',
gpointer,
gpointer,
)
g_object_unref = ctypes_get_func(
libgobject,
'g_object_unref',
None,
gpointer,
)
g_object_ref_sink = ctypes_get_func(
libgobject,
'g_object_ref_sink',
gpointer,
gpointer,
)
g_object_is_floating = ctypes_get_func(
libgobject,
'g_object_is_floating',
gpointer,
gpointer,
)
g_object_force_floating = ctypes_get_func(
libgobject,
'g_object_force_floating',
None,
POINTER(GObject),
)
GWeakNotify = CFUNCTYPE(None, gpointer, POINTER(GObject))
g_object_weak_ref = ctypes_get_func(
libgobject,
'g_object_weak_ref',
None,
POINTER(GObject),
GWeakNotify,
gpointer,
)
g_object_weak_unref = ctypes_get_func(
libgobject,
'g_object_weak_unref',
None,
POINTER(GObject),
GWeakNotify,
gpointer,
)
g_object_add_weak_pointer = ctypes_get_func(
libgobject,
'g_object_add_weak_pointer',
None,
POINTER(GObject),
POINTER(gpointer),
)
g_object_remove_weak_pointer = ctypes_get_func(
libgobject,
'g_object_remove_weak_pointer',
None,
POINTER(GObject),
POINTER(gpointer),
)
GToggleNotify = CFUNCTYPE(None, gpointer, POINTER(GObject), gboolean)
g_object_add_toggle_ref = ctypes_get_func(
libgobject,
'g_object_add_toggle_ref',
None,
POINTER(GObject),
GToggleNotify,
gpointer,
)
g_object_remove_toggle_ref = ctypes_get_func(
libgobject,
'g_object_remove_toggle_ref',
None,
POINTER(GObject),
GToggleNotify,
gpointer,
)
g_object_connect = ctypes_get_func(
libgobject,
'g_object_connect',
gpointer,
#gpointer,
#gchar_p,
#...,
)
g_object_disconnect = ctypes_get_func(
libgobject,
'g_object_disconnect',
None,
#gpointer,
#gchar_p,
#...,
)
g_object_set = ctypes_get_func(
libgobject,
'g_object_set',
None,
#gpointer,
#gchar_p,
#...,
)
g_object_get = ctypes_get_func(
libgobject,
'g_object_get',
None,
#gpointer,
#gchar_p,
#...,
)
g_object_notify = ctypes_get_func(
libgobject,
'g_object_notify',
None,
POINTER(GObject),
gchar_p,
)
g_object_notify_by_pspec = ctypes_get_func(
libgobject,
'g_object_notify_by_pspec',
None,
POINTER(GObject),
POINTER(GParamSpec),
)
g_object_freeze_notify = ctypes_get_func(
libgobject,
'g_object_freeze_notify',
None,
POINTER(GObject),
)
g_object_thaw_notify = ctypes_get_func(
libgobject,
'g_object_thaw_notify',
None,
POINTER(GObject),
)
g_object_get_data = ctypes_get_func(
libgobject,
'g_object_get_data',
gpointer,
POINTER(GObject),
gchar_p,
)
g_object_set_data = ctypes_get_func(
libgobject,
'g_object_set_data',
None,
POINTER(GObject),
gchar_p,
gpointer,
)
g_object_set_data_full = ctypes_get_func(
libgobject,
'g_object_set_data_full',
None,
POINTER(GObject),
gchar_p,
gpointer,
GDestroyNotify,
)
g_object_steal_data = ctypes_get_func(
libgobject,
'g_object_steal_data',
gpointer,
POINTER(GObject),
gchar_p,
)
g_object_get_qdata = ctypes_get_func(
libgobject,
'g_object_get_qdata',
gpointer,
POINTER(GObject),
GQuark,
)
g_object_set_qdata = ctypes_get_func(
libgobject,
'g_object_set_qdata',
None,
POINTER(GObject),
GQuark,
gpointer,
)
g_object_set_qdata_full = ctypes_get_func(
libgobject,
'g_object_set_qdata_full',
None,
POINTER(GObject),
GQuark,
gpointer,
GDestroyNotify,
)
g_object_steal_qdata = ctypes_get_func(
libgobject,
'g_object_steal_qdata',
gpointer,
POINTER(GObject),
GQuark,
)
g_object_set_property = ctypes_get_func(
libgobject,
'g_object_set_property',
None,
POINTER(GObject),
gchar_p,
POINTER(GValue),
)
g_object_get_property = ctypes_get_func(
libgobject,
'g_object_get_property',
None,
POINTER(GObject),
gchar_p,
POINTER(GValue),
)
g_object_new_valist = ctypes_get_func(
libgobject,
'g_object_new_valist',
POINTER(GObject),
GType,
gchar_p,
gpointer, # va_list
)
g_object_set_valist = ctypes_get_func(
libgobject,
'g_object_set_valist',
None,
POINTER(GObject),
gchar_p,
gpointer, # va_list
)
g_object_get_valist = ctypes_get_func(
libgobject,
'g_object_get_valist',
None,
POINTER(GObject),
gchar_p,
gpointer, # va_list
)
g_object_watch_closure = ctypes_get_func(
libgobject,
'g_object_watch_closure',
None,
POINTER(GObject),
POINTER(GClosure),
)
g_object_run_dispose = ctypes_get_func(
libgobject,
'g_object_run_dispose',
None,
POINTER(GObject),
)
g_initially_unowned_get_type = ctypes_get_func(
libgobject,
'g_initially_unowned_get_type',
GType,
)
#
# GEnum/GFlags
#
# g_enum_get_value = ctypes_get_func(
# libgobject,
# 'g_enum_get_value',
# POINTER(GEnumValue),
# POINTER(GEnumClass),
# gint,
# )
# g_enum_get_value_by_name = ctypes_get_func(
# libgobject,
# 'g_enum_get_value_by_name',
# POINTER(GEnumValue),
# POINTER(GEnumClass),
# gchar_p,
# )
# g_enum_get_value_by_nick = ctypes_get_func(
# libgobject,
# 'g_enum_get_value_by_nick',
# POINTER(GEnumValue),
# POINTER(GEnumClass),
# gchar_p,
# )
# g_flags_get_first_value = ctypes_get_func(
# libgobject,
# 'g_flags_get_first_value',
# POINTER(GFlagsValue),
# POINTER(GFlagsClass),
# guint,
# )
# g_flags_get_value_by_name = ctypes_get_func(
# libgobject,
# 'g_flags_get_value_by_name',
# POINTER(GFlagsValue),
# POINTER(GFlagsClass),
# gchar_p,
# )
# g_flags_get_value_by_nick = ctypes_get_func(
# libgobject,
# 'g_flags_get_value_by_nick',
# POINTER(GFlagsValue),
# POINTER(GFlagsClass),
# gchar_p,
# )
# g_enum_register_static = ctypes_get_func(
# libgobject,
# 'g_enum_register_static',
# GType,
# gchar_p,
# POINTER(GEnumValue),
# )
# g_flags_register_static = ctypes_get_func(
# libgobject,
# 'g_flags_register_static',
# GType,
# gchar_p,
# POINTER(GFlagsValue),
# )
# g_enum_complete_type_info = ctypes_get_func(
# libgobject,
# 'g_enum_complete_type_info',
# None,
# GType,
# POINTER(GTypeInfo),
# POINTER(GEnumValue),
# )
# g_flags_complete_type_info = ctypes_get_func(
# libgobject,
# 'g_flags_complete_type_info',
# None,
# GType,
# POINTER(GTypeInfo),
# POINTER(GFlagsValue),
# )
#
# GBoxed
#
# GBoxedCopyFunc = CFUNCTYPE(gpointer, gpointer)
# GBoxedFreeFunc = CFUNCTYPE(None, gpointer)
# g_boxed_copy = ctypes_get_func(
# libgobject,
# 'g_boxed_copy',
# gpointer,
# GType,
# gconstpointer,
# )
# g_boxed_free = ctypes_get_func(
# libgobject,
# 'g_boxed_free',
# None,
# GType,
# gpointer,
# )
# g_boxed_type_register_static = ctypes_get_func(
# libgobject,
# 'g_boxed_type_register_static',
# GType,
# gchar_p,
# GBoxedCopyFunc,
# GBoxedFreeFunc,
# )
# g_pointer_type_register_static = ctypes_get_func(
# libgobject,
# 'g_pointer_type_register_static',
# GType,
# gchar_p,
# )
g_closure_get_type = ctypes_get_func(
libgobject,
'g_closure_get_type',
GType,
)
g_value_get_type = ctypes_get_func(
libgobject,
'g_value_get_type',
GType,
)
g_value_array_get_type = ctypes_get_func(
libgobject,
'g_value_array_get_type',
GType,
)
# g_date_get_type = ctypes_get_func(
# libgobject,
# 'g_date_get_type',
# GType,
# )
# g_strv_get_type = ctypes_get_func(
# libgobject,
# 'g_strv_get_type',
# GType,
# )
# g_gstring_get_type = ctypes_get_func(
# libgobject,
# 'g_gstring_get_type',
# GType,
# )
# g_hash_table_get_type = ctypes_get_func(
# libgobject,
# 'g_hash_table_get_type',
# GType,
# )
# g_array_get_type = ctypes_get_func(
# libgobject,
# 'g_array_get_type',
# GType,
# )
# g_byte_array_get_type = ctypes_get_func(
# libgobject,
# 'g_byte_array_get_type',
# GType,
# )
# g_ptr_array_get_type = ctypes_get_func(
# libgobject,
# 'g_ptr_array_get_type',
# GType,
# )
# g_variant_type_get_gtype = ctypes_get_func(
# libgobject,
# 'g_variant_type_get_gtype',
# GType,
# )
# g_variant_get_gtype = ctypes_get_func(
# libgobject,
# 'g_variant_get_gtype',
# GType,
# )
# g_regex_get_type = ctypes_get_func(
# libgobject,
# 'g_regex_get_type',
# GType,
# )
# g_error_get_type = ctypes_get_func(
# libgobject,
# 'g_error_get_type',
# GType,
# )
#
# GValue
#
# g_value_init = ctypes_get_func(
# libgobject,
# 'g_value_init',
# POINTER(GValue),
# POINTER(GValue),
# GType,
# )
# g_value_copy = ctypes_get_func(
# libgobject,
# 'g_value_copy',
# None,
# POINTER(GValue),
# POINTER(GValue),
# )
# g_value_reset = ctypes_get_func(
# libgobject,
# 'g_value_reset',
# POINTER(GValue),
# POINTER(GValue),
# )
# g_value_unset = ctypes_get_func(
# libgobject,
# 'g_value_unset',
# None,
# POINTER(GValue),
# )
# g_value_set_instance = ctypes_get_func(
# libgobject,
# 'g_value_set_instance',
# None,
# POINTER(GValue),
# gpointer,
# )
# g_value_fits_pointer = ctypes_get_func(
# libgobject,
# 'g_value_fits_pointer',
# gboolean,
# POINTER(GValue),
# )
# g_value_peek_pointer = ctypes_get_func(
# libgobject,
# 'g_value_peek_pointer',
# gboolean,
# POINTER(GValue),
# )
# g_value_type_compatible = ctypes_get_func(
# libgobject,
# 'g_value_type_compatible',
# gboolean,
# GType,
# GType,
# )
# g_value_type_transformable = ctypes_get_func(
# libgobject,
# 'g_value_type_transformable',
# gboolean,
# GType,
# GType,
# )
# g_value_transform = ctypes_get_func(
# libgobject,
# 'g_value_transform',
# gboolean,
# POINTER(GValue),
# POINTER(GValue),
# )
# GValueTransform = CFUNCTYPE(None, POINTER(GValue), POINTER(GValue))
# g_value_register_transform_func = ctypes_get_func(
# libgobject,
# 'g_value_register_transform_func',
# None,
# GType,
# GType,
# GValueTransform,
# )
# g_strdup_value_contents = ctypes_get_func(
# libgobject,
# 'g_strdup_value_contents',
# gchar_p,
# POINTER(GValue),
# )
#
# GParamSpec/GValue
#
# g_param_spec_boolean = ctypes_get_func(
# libgobject,
# 'g_param_spec_boolean',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gboolean,
# GParamFlags,
# )
g_value_set_boolean = ctypes_get_func(
libgobject,
'g_value_set_boolean',
None,
POINTER(GValue),
gboolean,
)
g_value_get_boolean = ctypes_get_func(
libgobject,
'g_value_get_boolean',
gboolean,
POINTER(GValue),
)
# g_param_spec_char = ctypes_get_func(
# libgobject,
# 'g_param_spec_char',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gint8,
# gint8,
# gint8,
# GParamFlags,
# )
g_value_set_char = ctypes_get_func(
libgobject,
'g_value_set_char',
None,
POINTER(GValue),
gchar,
)
g_value_get_char = ctypes_get_func(
libgobject,
'g_value_get_char',
gchar,
POINTER(GValue),
)
# g_param_spec_uchar = ctypes_get_func(
# libgobject,
# 'g_param_spec_uchar',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# guint8,
# guint8,
# guint8,
# GParamFlags,
# )
g_value_set_uchar = ctypes_get_func(
libgobject,
'g_value_set_uchar',
None,
POINTER(GValue),
guchar,
)
g_value_get_uchar = ctypes_get_func(
libgobject,
'g_value_get_uchar',
guchar,
POINTER(GValue),
)
# g_param_spec_int = ctypes_get_func(
# libgobject,
# 'g_param_spec_int',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gint,
# gint,
# gint,
# GParamFlags,
# )
g_value_set_int = ctypes_get_func(
libgobject,
'g_value_set_int',
None,
POINTER(GValue),
gint,
)
g_value_get_int = ctypes_get_func(
libgobject,
'g_value_get_int',
gint,
POINTER(GValue),
)
# g_param_spec_uint = ctypes_get_func(
# libgobject,
# 'g_param_spec_uint',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# guint,
# guint,
# guint,
# GParamFlags,
# )
g_value_set_uint = ctypes_get_func(
libgobject,
'g_value_set_uint',
None,
POINTER(GValue),
guint,
)
g_value_get_uint = ctypes_get_func(
libgobject,
'g_value_get_uint',
guint,
POINTER(GValue),
)
# g_param_spec_long = ctypes_get_func(
# libgobject,
# 'g_param_spec_long',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# glong,
# glong,
# glong,
# GParamFlags,
# )
g_value_set_long = ctypes_get_func(
libgobject,
'g_value_set_long',
None,
POINTER(GValue),
glong,
)
g_value_get_long = ctypes_get_func(
libgobject,
'g_value_get_long',
glong,
POINTER(GValue),
)
# g_param_spec_ulong = ctypes_get_func(
# libgobject,
# 'g_param_spec_ulong',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gulong,
# gulong,
# gulong,
# GParamFlags,
# )
g_value_set_ulong = ctypes_get_func(
libgobject,
'g_value_set_ulong',
None,
POINTER(GValue),
gulong,
)
g_value_get_ulong = ctypes_get_func(
libgobject,
'g_value_get_ulong',
gulong,
POINTER(GValue),
)
# g_param_spec_int64 = ctypes_get_func(
# libgobject,
# 'g_param_spec_int64',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gint64,
# gint64,
# gint64,
# GParamFlags,
# )
g_value_set_int64 = ctypes_get_func(
libgobject,
'g_value_set_int64',
None,
POINTER(GValue),
gint64,
)
g_value_get_int64 = ctypes_get_func(
libgobject,
'g_value_get_int64',
gint64,
POINTER(GValue),
)
# g_param_spec_uint64 = ctypes_get_func(
# libgobject,
# 'g_param_spec_uint64',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# guint64,
# guint64,
# guint64,
# GParamFlags,
# )
g_value_set_uint64 = ctypes_get_func(
libgobject,
'g_value_set_uint64',
None,
POINTER(GValue),
guint64,
)
g_value_get_uint64 = ctypes_get_func(
libgobject,
'g_value_get_uint64',
guint64,
POINTER(GValue),
)
# g_param_spec_float = ctypes_get_func(
# libgobject,
# 'g_param_spec_float',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gfloat,
# gfloat,
# gfloat,
# GParamFlags,
# )
g_value_set_float = ctypes_get_func(
libgobject,
'g_value_set_float',
None,
POINTER(GValue),
gfloat,
)
g_value_get_float = ctypes_get_func(
libgobject,
'g_value_get_float',
gfloat,
POINTER(GValue),
)
# g_param_spec_double = ctypes_get_func(
# libgobject,
# 'g_param_spec_double',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gdouble,
# gdouble,
# gdouble,
# GParamFlags,
# )
g_value_set_double = ctypes_get_func(
libgobject,
'g_value_set_double',
None,
POINTER(GValue),
gdouble,
)
g_value_get_double = ctypes_get_func(
libgobject,
'g_value_get_double',
gdouble,
POINTER(GValue),
)
# g_param_spec_enum = ctypes_get_func(
# libgobject,
# 'g_param_spec_enum',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# GType,
# gint,
# GParamFlags,
# )
g_value_set_enum = ctypes_get_func(
libgobject,
'g_value_set_enum',
None,
POINTER(GValue),
gint,
)
g_value_get_enum = ctypes_get_func(
libgobject,
'g_value_get_enum',
gint,
POINTER(GValue),
)
# g_param_spec_flags = ctypes_get_func(
# libgobject,
# 'g_param_spec_flags',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# GType,
# guint,
# GParamFlags,
# )
g_value_set_flags = ctypes_get_func(
libgobject,
'g_value_set_flags',
None,
POINTER(GValue),
guint,
)
g_value_get_flags = ctypes_get_func(
libgobject,
'g_value_get_flags',
guint,
POINTER(GValue),
)
# g_param_spec_string = ctypes_get_func(
# libgobject,
# 'g_param_spec_string',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gchar_p,
# GParamFlags,
# )
g_value_set_string = ctypes_get_func(
libgobject,
'g_value_set_string',
None,
POINTER(GValue),
gchar_p,
)
g_value_set_static_string = ctypes_get_func(
libgobject,
'g_value_set_static_string',
None,
POINTER(GValue),
gchar_p,
)
g_value_take_string = ctypes_get_func(
libgobject,
'g_value_take_string',
None,
POINTER(GValue),
gchar_p,
)
g_value_set_string_take_ownership = ctypes_get_func(
libgobject,
'g_value_set_string_take_ownership',
None,
POINTER(GValue),
gchar_p,
)
g_value_get_string = ctypes_get_func(
libgobject,
'g_value_get_string',
gchar_p,
POINTER(GValue),
)
# g_value_dup_string = ctypes_get_func(
# libgobject,
# 'g_value_dup_string',
# gchar_p,
# POINTER(GValue),
# )
# g_param_spec_param = ctypes_get_func(
# libgobject,
# 'g_param_spec_param',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# GType,
# GParamFlags,
# )
g_value_set_param = ctypes_get_func(
libgobject,
'g_value_set_param',
None,
POINTER(GValue),
POINTER(GParamSpec),
)
g_value_take_param = ctypes_get_func(
libgobject,
'g_value_take_param',
None,
POINTER(GValue),
POINTER(GParamSpec),
)
g_value_set_param_take_ownership = ctypes_get_func(
libgobject,
'g_value_set_param_take_ownership',
None,
POINTER(GValue),
POINTER(GParamSpec),
)
g_value_get_param = ctypes_get_func(
libgobject,
'g_value_get_param',
POINTER(GParamSpec),
POINTER(GValue),
)
# g_value_dup_param = ctypes_get_func(
# libgobject,
# 'g_value_dup_param',
# POINTER(GParamSpec),
# POINTER(GValue),
# )
# g_param_spec_boxed = ctypes_get_func(
# libgobject,
# 'g_param_spec_boxed',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# GType,
# GParamFlags,
# )
g_value_set_boxed = ctypes_get_func(
libgobject,
'g_value_set_boxed',
None,
POINTER(GValue),
gconstpointer,
)
g_value_set_static_boxed = ctypes_get_func(
libgobject,
'g_value_set_static_boxed',
None,
POINTER(GValue),
gconstpointer,
)
g_value_take_boxed = ctypes_get_func(
libgobject,
'g_value_take_boxed',
None,
POINTER(GValue),
gconstpointer,
)
g_value_set_boxed_take_ownership = ctypes_get_func(
libgobject,
'g_value_set_boxed_take_ownership',
None,
POINTER(GValue),
gconstpointer,
)
g_value_get_boxed = ctypes_get_func(
libgobject,
'g_value_get_boxed',
gpointer,
POINTER(GValue),
)
# g_value_dup_boxed = ctypes_get_func(
# libgobject,
# 'g_value_dup_boxed',
# gpointer,
# POINTER(GValue),
# )
# g_param_spec_pointer = ctypes_get_func(
# libgobject,
# 'g_param_spec_pointer',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# GParamFlags,
# )
g_value_set_pointer = ctypes_get_func(
libgobject,
'g_value_set_pointer',
None,
POINTER(GValue),
gpointer,
)
g_value_get_pointer = ctypes_get_func(
libgobject,
'g_value_get_pointer',
gpointer,
POINTER(GValue),
)
g_gtype_get_type = ctypes_get_func(
libgobject,
'g_gtype_get_type',
GType,
)
# g_param_spec_object = ctypes_get_func(
# libgobject,
# 'g_param_spec_object',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# GType,
# GParamFlags,
# )
g_value_set_object = ctypes_get_func(
libgobject,
'g_value_set_object',
None,
POINTER(GValue),
gpointer,
)
g_value_take_object = ctypes_get_func(
libgobject,
'g_value_take_object',
None,
POINTER(GValue),
gpointer,
)
g_value_set_object_take_ownership = ctypes_get_func(
libgobject,
'g_value_set_object_take_ownership',
None,
POINTER(GValue),
gpointer,
)
g_value_get_object = ctypes_get_func(
libgobject,
'g_value_get_object',
gpointer,
POINTER(GValue),
)
# g_value_dup_object = ctypes_get_func(
# libgobject,
# 'g_value_dup_object',
# gpointer,
# POINTER(GValue),
# )
# g_param_spec_unichar = ctypes_get_func(
# libgobject,
# 'g_param_spec_unichar',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# gunichar,
# GParamFlags,
# )
# g_param_spec_value_array = ctypes_get_func(
# libgobject,
# 'g_param_spec_value_array',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# POINTER(GParamSpec),
# GParamFlags,
# )
# g_param_spec_override = ctypes_get_func(
# libgobject,
# 'g_param_spec_override',
# POINTER(GParamSpec),
# gchar_p,
# POINTER(GParamSpec),
# )
# g_param_spec_gtype = ctypes_get_func(
# libgobject,
# 'g_param_spec_gtype',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# GType,
# GParamFlags,
# )
g_value_get_gtype = ctypes_get_func(
libgobject,
'g_value_get_gtype',
GType,
POINTER(GValue),
)
g_value_set_gtype = ctypes_get_func(
libgobject,
'g_value_set_gtype',
None,
POINTER(GValue),
GType,
)
# g_param_spec_variant = ctypes_get_func(
# libgobject,
# 'g_param_spec_variant',
# POINTER(GParamSpec),
# gchar_p,
# gchar_p,
# gchar_p,
# POINTER(GVariantType),
# POINTER(GVariant),
# GParamFlags,
# )
g_value_get_variant = ctypes_get_func(
libgobject,
'g_value_get_variant',
POINTER(GVariant),
POINTER(GValue),
)
# g_value_dup_variant = ctypes_get_func(
# libgobject,
# 'g_value_dup_variant',
# POINTER(GVariant),
# POINTER(GValue),
# )
g_value_set_variant = ctypes_get_func(
libgobject,
'g_value_set_variant',
None,
POINTER(GValue),
POINTER(GVariant),
)
# g_value_take_variant = ctypes_get_func(
# libgobject,
# 'g_value_take_variant',
# None,
# POINTER(GValue),
# POINTER(GVariant),
# )
#
# GParamSpec
#
# g_param_spec_ref = ctypes_get_func(
# libgobject,
# 'g_param_spec_ref',
# POINTER(GParamSpec),
# POINTER(GParamSpec),
# )
# g_param_spec_unref = ctypes_get_func(
# libgobject,
# 'g_param_spec_unref',
# None,
# POINTER(GParamSpec),
# )
# g_param_spec_sink = ctypes_get_func(
# libgobject,
# 'g_param_spec_sink',
# None,
# POINTER(GParamSpec),
# )
# g_param_spec_ref_sink = ctypes_get_func(
# libgobject,
# 'g_param_spec_ref_sink',
# POINTER(GParamSpec),
# POINTER(GParamSpec),
# )
# g_param_value_set_default = ctypes_get_func(
# libgobject,
# 'g_param_value_set_default',
# None,
# POINTER(GParamSpec),
# POINTER(GValue),
# )
# g_param_value_defaults = ctypes_get_func(
# libgobject,
# 'g_param_value_defaults',
# gboolean,
# POINTER(GParamSpec),
# POINTER(GValue),
# )
# g_param_value_validate = ctypes_get_func(
# libgobject,
# 'g_param_value_validate',
# gboolean,
# POINTER(GParamSpec),
# POINTER(GValue),
# )
# g_param_value_convert = ctypes_get_func(
# libgobject,
# 'g_param_value_convert',
# gboolean,
# POINTER(GParamSpec),
# POINTER(GValue),
# POINTER(GValue),
# gboolean,
# )
# g_param_values_cmp = ctypes_get_func(
# libgobject,
# 'g_param_values_cmp',
# gint,
# POINTER(GParamSpec),
# POINTER(GValue),
# POINTER(GValue),
# )
# g_param_spec_get_name = ctypes_get_func(
# libgobject,
# 'g_param_spec_get_name',
# gchar,
# POINTER(GParamSpec),
# )
# g_param_spec_get_nick = ctypes_get_func(
# libgobject,
# 'g_param_spec_get_nick',
# gchar,
# POINTER(GParamSpec),
# )
# g_param_spec_get_blurb = ctypes_get_func(
# libgobject,
# 'g_param_spec_get_blurb',
# gchar,
# POINTER(GParamSpec),
# )
# g_param_spec_get_qdata = ctypes_get_func(
# libgobject,
# 'g_param_spec_get_qdata',
# gpointer,
# POINTER(GParamSpec),
# GQuark,
# )
# g_param_spec_set_qdata = ctypes_get_func(
# libgobject,
# 'g_param_spec_set_qdata',
# None,
# POINTER(GParamSpec),
# GQuark,
# gpointer,
# )
# g_param_spec_set_qdata_full = ctypes_get_func(
# libgobject,
# 'g_param_spec_set_qdata_full',
# None,
# POINTER(GParamSpec),
# GQuark,
# gpointer,
# GDestroyNotify,
# )
# g_param_spec_steal_qdata = ctypes_get_func(
# libgobject,
# 'g_param_spec_steal_qdata',
# gpointer,
# POINTER(GParamSpec),
# GQuark,
# )
# g_param_spec_get_redirect_target = ctypes_get_func(
# libgobject,
# 'g_param_spec_get_redirect_target',
# POINTER(GParamSpec),
# POINTER(GParamSpec),
# )
# g_param_spec_internal | |
# File: office365_connector.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Phantom App imports
import base64
import grp
import json
import os
import pwd
import sys
import time
import uuid
from datetime import datetime, timedelta
import phantom.app as phantom
import requests
from bs4 import BeautifulSoup, UnicodeDammit
from django.http import HttpResponse
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from phantom.vault import Vault
from office365_consts import *
from process_email import ProcessEmail
TC_FILE = "oauth_task.out"
SERVER_TOKEN_URL = "https://login.microsoftonline.com/{0}/oauth2/v2.0/token"
MSGRAPH_API_URL = "https://graph.microsoft.com/v1.0"
MAX_END_OFFSET_VAL = 2147483646
class ReturnException(Exception):
pass
class RetVal(tuple):
def __new__(cls, val1, val2):
return tuple.__new__(RetVal, (val1, val2))
def _load_app_state(asset_id, app_connector=None):
""" This function is used to load the current state file.
:param asset_id: asset_id
:param app_connector: Object of app_connector class
:return: state: Current state file as a dictionary
"""
asset_id = str(asset_id)
if not asset_id or not asset_id.isalnum():
if app_connector:
app_connector.debug_print('In _load_app_state: Invalid asset_id')
return {}
app_dir = os.path.dirname(os.path.abspath(__file__))
state_file = '{0}/{1}_state.json'.format(app_dir, asset_id)
real_state_file_path = os.path.abspath(state_file)
if not os.path.dirname(real_state_file_path) == app_dir:
if app_connector:
app_connector.debug_print('In _load_app_state: Invalid asset_id')
return {}
state = {}
try:
with open(real_state_file_path, 'r') as state_file_obj:
state_file_data = state_file_obj.read()
state = json.loads(state_file_data)
except Exception as e:
if app_connector:
# Fetching the Python major version
try:
python_version = int(sys.version_info[0])
except:
app_connector.debug_print("Error occurred while getting the Phantom server's Python major version.")
return state
error_code, error_msg = _get_error_message_from_exception(python_version, e, app_connector)
app_connector.debug_print('In _load_app_state: Error Code: {0}. Error Message: {1}'.format(error_code, error_msg))
if app_connector:
app_connector.debug_print('Loaded state: ', state)
return state
def _save_app_state(state, asset_id, app_connector):
""" This function is used to save current state in file.
:param state: Dictionary which contains data to write in state file
:param asset_id: asset_id
:param app_connector: Object of app_connector class
:return: status: phantom.APP_SUCCESS
"""
asset_id = str(asset_id)
if not asset_id or not asset_id.isalnum():
if app_connector:
app_connector.debug_print('In _save_app_state: Invalid asset_id')
return {}
app_dir = os.path.split(__file__)[0]
state_file = '{0}/{1}_state.json'.format(app_dir, asset_id)
real_state_file_path = os.path.abspath(state_file)
if not os.path.dirname(real_state_file_path) == app_dir:
if app_connector:
app_connector.debug_print('In _save_app_state: Invalid asset_id')
return {}
if app_connector:
app_connector.debug_print('Saving state: ', state)
try:
with open(real_state_file_path, 'w+') as state_file_obj:
state_file_obj.write(json.dumps(state))
except Exception as e:
# Fetching the Python major version
try:
python_version = int(sys.version_info[0])
except:
if app_connector:
app_connector.debug_print("Error occurred while getting the Phantom server's Python major version.")
return phantom.APP_ERROR
error_code, error_msg = _get_error_message_from_exception(python_version, e, app_connector)
if app_connector:
app_connector.debug_print('Unable to save state file: Error Code: {0}. Error Message: {1}'.format(error_code, error_msg))
print('Unable to save state file: Error Code: {0}. Error Message: {1}'.format(error_code, error_msg))
return phantom.APP_ERROR
return phantom.APP_SUCCESS
def _handle_py_ver_compat_for_input_str(python_version, input_str, app_connector=None):
"""
This method returns the encoded|original string based on the Python version.
:param input_str: Input string to be processed
:return: input_str (Processed input string based on following logic 'input_str - Python 3; encoded input_str - Python 2')
"""
try:
if input_str and python_version < 3:
input_str = UnicodeDammit(input_str).unicode_markup.encode('utf-8')
except:
if app_connector:
app_connector.debug_print("Error occurred while handling python 2to3 compatibility for the input string")
return input_str
def _get_error_message_from_exception(python_version, e, app_connector=None):
""" This function is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters."
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_code = "Error code unavailable"
error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters."
except:
error_code = "Error code unavailable"
error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters."
try:
error_msg = _handle_py_ver_compat_for_input_str(python_version, error_msg, app_connector)
except TypeError:
error_msg = "Error occurred while handling python 2to3 compatibility for the input string"
except:
error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters."
return error_code, error_msg
def _handle_oauth_result(request, path_parts):
"""
<base_url>?admin_consent=True&tenant=a417c578-c7ee-480d-a225-d48057e74df5&state=13
"""
asset_id = request.GET.get('state')
if not asset_id:
return HttpResponse("ERROR: Asset ID not found in URL\n{0}".format(json.dumps(request.GET)), content_type="text/plain", status=400)
# first check for error info
error = request.GET.get('error')
error_description = request.GET.get('error_description')
if error:
message = "Error: {0}".format(error)
if error_description:
message += " Details: {0}".format(error_description)
return HttpResponse("Server returned {0}".format(message), content_type="text/plain", status=400)
admin_consent = (request.GET.get('admin_consent'))
code = (request.GET.get('code'))
if not admin_consent and not(code):
return HttpResponse("ERROR: admin_consent or authorization code not found in URL\n{0}".format(
json.dumps(request.GET)), content_type="text/plain", status=400)
# Load the data
state = _load_app_state(asset_id)
if admin_consent:
if admin_consent == 'True':
admin_consent = True
else:
admin_consent = False
state['admin_consent'] = admin_consent
_save_app_state(state, asset_id, None)
# If admin_consent is True
if admin_consent:
return HttpResponse('Admin Consent received. Please close this window.', content_type="text/plain")
return HttpResponse('Admin Consent declined. Please close this window and try again later.', content_type="text/plain", status=400)
# If value of admin_consent is not available, value of code is available
state['code'] = code
_save_app_state(state, asset_id, None)
return HttpResponse('Code received. Please close this window, the action will continue to get new token.', content_type="text/plain")
def _handle_oauth_start(request, path_parts):
# get the asset id, the state file is created for each asset
asset_id = request.GET.get('asset_id')
if not asset_id:
return HttpResponse("ERROR: Asset ID not found in URL", content_type="text/plain", status=404)
# Load the state that was created for the asset
state = _load_app_state(asset_id)
if not state:
return HttpResponse('ERROR: Invalid asset_id', content_type="text/plain", status=400)
# get the url to point to the authorize url of OAuth
admin_consent_url = state.get('admin_consent_url')
if not admin_consent_url:
return HttpResponse("App state is invalid, admin_consent_url key not found", content_type="text/plain", status=400)
# Redirect to this link, the user will then require to enter credentials interactively
response = HttpResponse(status=302)
response['Location'] = admin_consent_url
return response
def handle_request(request, path_parts):
"""
request contains the data posted to the rest endpoint, it is the django http request object
path_parts is a list of the URL tokenized
"""
# get the type of data requested, it's the last part of the URL used to post to the REST endpoint
if len(path_parts) < 2:
return HttpResponse('error: True, message: Invalid REST endpoint request', content_type="text/plain", status=404)
call_type = path_parts[1]
if call_type == 'start_oauth':
# start the authentication process
return _handle_oauth_start(request, path_parts)
if call_type == 'result':
# process the 'code'
ret_val = _handle_oauth_result(request, path_parts)
asset_id = request.GET.get('state')
if asset_id and asset_id.isalnum():
app_dir = os.path.dirname(os.path.abspath(__file__))
auth_status_file_path = '{0}/{1}_{2}'.format(app_dir, asset_id, TC_FILE)
real_auth_status_file_path = os.path.abspath(auth_status_file_path)
if not os.path.dirname(real_auth_status_file_path) == app_dir:
return HttpResponse("Error: Invalid asset_id", content_type="text/plain", status=400)
open(auth_status_file_path, 'w').close()
try:
uid = pwd.getpwnam("apache").pw_uid
gid = grp.getgrnam("phantom").gr_gid
os.chown(auth_status_file_path, uid, gid)
os.chmod(auth_status_file_path, "0664")
except:
pass
return ret_val
"""
if call_type == 'refresh_token':
return _handle_oauth_refresh_token(request, path_parts)
"""
return HttpResponse('error: Invalid endpoint', content_type="text/plain", status=404)
def _get_dir_name_from_app_name(app_name):
app_name = ''.join([x for x in app_name if x.isalnum()])
app_name = app_name.lower()
if not app_name:
# hardcode it
app_name = "app_for_phantom"
return app_name
class Office365Connector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(Office365Connector, self).__init__()
self._state = None
# Variable to hold a base_url in case the app makes REST calls
# Do note that the app json defines the asset config, so please
# modify this as you deem fit.
self._base_url = None
self._tenant = None
self._client_id = None
self._client_secret = None
self._admin_access = None
self._scope = None
self._access_token = None
self._refresh_token = None
self._REPLACE_CONST = "C53CEA8298BD401BA695F247633D0542"
def _process_empty_reponse(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None)
def _process_html_response(self, response, action_result):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
try:
error_text = _handle_py_ver_compat_for_input_str(self._python_version, error_text, self)
except TypeError:
error_text = "Error occurred while handling python 2to3 compatibility for the error string"
except:
error_text = "Unknown | |
<filename>dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_looplifting.py<gh_stars>1000+
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import types, utils
from numba import unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from .support import TestCase, tag, MemoryLeakMixin
looplift_flags = Flags()
looplift_flags.set("enable_pyobject")
looplift_flags.set("enable_looplift")
pyobject_looplift_flags = looplift_flags.copy()
pyobject_looplift_flags.set("enable_pyobject_looplift")
def lift1(x):
# Outer needs object mode because of np.empty()
a = np.empty(3)
for i in range(a.size):
# Inner is nopython-compliant
a[i] = x
return a
def lift2(x):
# Outer needs object mode because of np.empty()
a = np.empty((3, 4))
for i in range(a.shape[0]):
for j in range(a.shape[1]):
# Inner is nopython-compliant
a[i, j] = x
return a
def lift3(x):
# Output variable from the loop
_ = object()
a = np.arange(5, dtype=np.int64)
c = 0
for i in range(a.shape[0]):
c += a[i] * x
return c
def lift4(x):
# Output two variables from the loop
_ = object()
a = np.arange(5, dtype=np.int64)
c = 0
d = 0
for i in range(a.shape[0]):
c += a[i] * x
d += c
return c + d
def lift5(x):
_ = object()
a = np.arange(4)
for i in range(a.shape[0]):
# Inner has a break statement
if i > 2:
break
return a
def lift_gen1(x):
# Outer needs object mode because of np.empty()
a = np.empty(3)
yield 0
for i in range(a.size):
# Inner is nopython-compliant
a[i] = x
yield np.sum(a)
def lift_issue2561():
np.empty(1) # This forces objectmode because no nrt
for i in range(10):
for j in range(10):
return 1
return 2
def reject1(x):
a = np.arange(4)
for i in range(a.shape[0]):
# Inner returns a variable from outer "scope" => cannot loop-lift
return a
return a
def reject_gen1(x):
_ = object()
a = np.arange(4)
for i in range(a.shape[0]):
# Inner is a generator => cannot loop-lift
yield a[i]
def reject_gen2(x):
_ = object()
a = np.arange(3)
for i in range(a.size):
# Middle has a yield => cannot loop-lift
res = a[i] + x
for j in range(i):
# Inner is nopython-compliant, but the current algorithm isn't
# able to separate it.
res = res ** 2
yield res
def reject_npm1(x):
a = np.empty(3, dtype=np.int32)
for i in range(a.size):
# Inner uses object() => cannot loop-lift
_ = object()
a[i] = np.arange(i + 1)[i]
return a
class TestLoopLifting(MemoryLeakMixin, TestCase):
def try_lift(self, pyfunc, argtypes):
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
# One lifted loop
self.assertEqual(len(cres.lifted), 1)
return cres
def assert_lifted_native(self, cres):
# Check if we have lifted in nopython mode
jitloop = cres.lifted[0]
[loopcres] = jitloop.overloads.values()
self.assertTrue(loopcres.fndesc.native) # Lifted function is native
def check_lift_ok(self, pyfunc, argtypes, args):
"""
Check that pyfunc can loop-lift even in nopython mode.
"""
cres = self.try_lift(pyfunc, argtypes)
expected = pyfunc(*args)
got = cres.entry_point(*args)
self.assert_lifted_native(cres)
# Check return values
self.assertPreciseEqual(expected, got)
def check_lift_generator_ok(self, pyfunc, argtypes, args):
"""
Check that pyfunc (a generator function) can loop-lift even in
nopython mode.
"""
cres = self.try_lift(pyfunc, argtypes)
expected = list(pyfunc(*args))
got = list(cres.entry_point(*args))
self.assert_lifted_native(cres)
# Check return values
self.assertPreciseEqual(expected, got)
def check_no_lift(self, pyfunc, argtypes, args):
"""
Check that pyfunc can't loop-lift.
"""
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
self.assertFalse(cres.lifted)
expected = pyfunc(*args)
got = cres.entry_point(*args)
# Check return values
self.assertPreciseEqual(expected, got)
def check_no_lift_generator(self, pyfunc, argtypes, args):
"""
Check that pyfunc (a generator function) can't loop-lift.
"""
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
self.assertFalse(cres.lifted)
expected = list(pyfunc(*args))
got = list(cres.entry_point(*args))
self.assertPreciseEqual(expected, got)
def check_no_lift_nopython(self, pyfunc, argtypes, args):
"""
Check that pyfunc will fail loop-lifting if pyobject mode
is disabled inside the loop, succeed otherwise.
"""
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
self.assertTrue(cres.lifted)
with self.assertTypingError():
cres.entry_point(*args)
cres = compile_isolated(pyfunc, argtypes,
flags=pyobject_looplift_flags)
self.assertTrue(cres.lifted)
expected = pyfunc(*args)
got = cres.entry_point(*args)
self.assertPreciseEqual(expected, got)
def test_lift1(self):
self.check_lift_ok(lift1, (types.intp,), (123,))
def test_lift2(self):
self.check_lift_ok(lift2, (types.intp,), (123,))
def test_lift3(self):
self.check_lift_ok(lift3, (types.intp,), (123,))
@tag('important')
def test_lift4(self):
self.check_lift_ok(lift4, (types.intp,), (123,))
def test_lift5(self):
self.check_lift_ok(lift5, (types.intp,), (123,))
def test_lift_issue2561(self):
self.check_no_lift(lift_issue2561, (), ())
@tag('important')
def test_lift_gen1(self):
self.check_lift_generator_ok(lift_gen1, (types.intp,), (123,))
def test_reject1(self):
self.check_no_lift(reject1, (types.intp,), (123,))
def test_reject_gen1(self):
self.check_no_lift_generator(reject_gen1, (types.intp,), (123,))
def test_reject_gen2(self):
self.check_no_lift_generator(reject_gen2, (types.intp,), (123,))
def test_reject_npm1(self):
self.check_no_lift_nopython(reject_npm1, (types.intp,), (123,))
class TestLoopLiftingAnnotate(TestCase):
def test_annotate_1(self):
"""
Verify that annotation works as expected with one lifted loop
"""
from numba import jit
# dummy function to force objmode
def bar():
pass
def foo(x):
bar() # force obj
for i in range(x.size):
x[i] += 1
return x
cfoo = jit(foo)
x = np.arange(10)
xcopy = x.copy()
r = cfoo(x)
np.testing.assert_equal(r, xcopy + 1)
buf = utils.StringIO()
cfoo.inspect_types(file=buf)
annotation = buf.getvalue()
buf.close()
self.assertIn("The function contains lifted loops", annotation)
line = foo.__code__.co_firstlineno + 2 # 2 lines down from func head
self.assertIn("Loop at line {line}".format(line=line), annotation)
self.assertIn("Has 1 overloads", annotation)
def test_annotate_2(self):
"""
Verify that annotation works as expected with two lifted loops
"""
from numba import jit
# dummy function to force objmode
def bar():
pass
def foo(x):
bar() # force obj
# first lifted loop
for i in range(x.size):
x[i] += 1
# second lifted loop
for j in range(x.size):
x[j] *= 2
return x
cfoo = jit(foo)
x = np.arange(10)
xcopy = x.copy()
r = cfoo(x)
np.testing.assert_equal(r, (xcopy + 1) * 2)
buf = utils.StringIO()
cfoo.inspect_types(file=buf)
annotation = buf.getvalue()
buf.close()
self.assertIn("The function contains lifted loops", annotation)
line1 = foo.__code__.co_firstlineno + 3 # 3 lines down from func head
line2 = foo.__code__.co_firstlineno + 6 # 6 lines down from func head
self.assertIn("Loop at line {line}".format(line=line1), annotation)
self.assertIn("Loop at line {line}".format(line=line2), annotation)
class TestLoopLiftingInAction(MemoryLeakMixin, TestCase):
def assert_has_lifted(self, jitted, loopcount):
lifted = jitted.overloads[jitted.signatures[0]].lifted
self.assertEqual(len(lifted), loopcount)
def test_issue_734(self):
from numba import jit, void, int32, double
@jit(void(int32, double[:]), forceobj=True)
def forloop_with_if(u, a):
if u == 0:
for i in range(a.shape[0]):
a[i] = a[i] * 2.0
else:
for i in range(a.shape[0]):
a[i] = a[i] + 1.0
for u in (0, 1):
nb_a = np.arange(10, dtype='int32')
np_a = np.arange(10, dtype='int32')
forloop_with_if(u, nb_a)
forloop_with_if.py_func(u, np_a)
self.assertPreciseEqual(nb_a, np_a)
def test_issue_812(self):
from numba import jit
@jit('f8[:](f8[:])', forceobj=True)
def test(x):
res = np.zeros(len(x))
ind = 0
for ii in range(len(x)):
ind += 1
res[ind] = x[ind]
if x[ind] >= 10:
break
# Invalid loopjitting will miss the usage of `ind` in the
# following loop.
for ii in range(ind + 1, len(x)):
res[ii] = 0
return res
x = np.array([1., 4, 2, -3, 5, 2, 10, 5, 2, 6])
np.testing.assert_equal(test.py_func(x), test(x))
def test_issue_2368(self):
from numba import jit
def lift_issue2368(a, b):
s = 0
for e in a:
s += e
h = b.__hash__()
return s, h
a = np.ones(10)
b = object()
jitted = jit(lift_issue2368)
expected = lift_issue2368(a, b)
got = jitted(a, b)
self.assertEqual(expected[0], got[0])
self.assertEqual(expected[1], got[1])
jitloop = jitted.overloads[jitted.signatures[0]].lifted[0]
[loopcres] = jitloop.overloads.values()
# assert lifted function is native
self.assertTrue(loopcres.fndesc.native)
def test_no_iteration_w_redef(self):
# redefinition of res in the loop with no use of res should not
# prevent lifting
from numba import jit
@jit(forceobj=True)
def test(n):
res = 0
for i in range(n):
res = i
return res
# loop count = 1, loop lift but loop body not execute
self.assertEqual(test.py_func(-1), test(-1))
self.assert_has_lifted(test, loopcount=1)
# loop count = 1, loop will lift and will execute
self.assertEqual(test.py_func(1), test(1))
self.assert_has_lifted(test, loopcount=1)
def test_no_iteration(self):
from numba import jit
@jit(forceobj=True)
def test(n):
res = 0
for i in range(n):
res += i
return res
# loop count = 1
self.assertEqual(test.py_func(-1), test(-1))
self.assert_has_lifted(test, loopcount=1)
# loop count = 1
self.assertEqual(test.py_func(1), test(1))
self.assert_has_lifted(test, loopcount=1)
def test_define_in_loop_body(self):
# tests a definition in a loop that leaves the loop is liftable
from numba import jit
@jit(forceobj=True)
def test(n):
for i in range(n):
res = i
return res
# loop count = 1
self.assertEqual(test.py_func(1), test(1))
self.assert_has_lifted(test, loopcount=1)
def test_invalid_argument(self):
"""Test a problem caused by invalid discovery of loop argument
when a variable is used afterwards but not before.
Before the fix, this will result in::
numba.ir.NotDefinedError: 'i' is not defined
"""
from numba import jit
@jit(forceobj=True)
def test(arg):
if type(arg) == np.ndarray: # force object mode
if arg.ndim == 1:
result = 0.0
j = 0
for i in range(arg.shape[0]):
pass
else:
raise Exception
else:
result = 0.0
i, j = 0, 0
return result
arg = np.arange(10)
self.assertEqual(test.py_func(arg), test(arg))
def test_conditionally_defined_in_loop(self):
from numba import jit
@jit(forceobj=True)
def test():
x = 5
y = 0
for i | |
= []
except (InvalidRSEExpression, RSEWriteBlocked) as error:
rule.state = RuleState.STUCK
rule.error = (str(error)[:245] + '...') if len(str(error)) > 245 else str(error)
rule.save(session=session)
# Insert rule history
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.STUCK})
logger(logging.DEBUG, '%s while repairing rule %s', str(error), rule_id)
return
# Create the RSESelector
try:
rseselector = RSESelector(account=rule.account,
rses=target_rses,
weight=rule.weight,
copies=rule.copies,
ignore_account_limit=rule.ignore_account_limit,
session=session)
except (InvalidRuleWeight, InsufficientTargetRSEs, InsufficientAccountLimit) as error:
rule.state = RuleState.STUCK
rule.error = (str(error)[:245] + '...') if len(str(error)) > 245 else str(error)
rule.save(session=session)
# Insert rule history
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.STUCK})
logger(logging.DEBUG, '%s while repairing rule %s', type(error).__name__, rule_id)
return
# Reset the counters
logger(logging.DEBUG, "Resetting counters for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
rule.locks_ok_cnt = 0
rule.locks_replicating_cnt = 0
rule.locks_stuck_cnt = 0
rule_counts = session.query(models.ReplicaLock.state, func.count(models.ReplicaLock.state)).filter(models.ReplicaLock.rule_id == rule.id).group_by(models.ReplicaLock.state).all()
for count in rule_counts:
if count[0] == LockState.OK:
rule.locks_ok_cnt = count[1]
elif count[0] == LockState.REPLICATING:
rule.locks_replicating_cnt = count[1]
elif count[0] == LockState.STUCK:
rule.locks_stuck_cnt = count[1]
logger(logging.DEBUG, "Finished resetting counters for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
# Get the did
did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == rule.scope,
models.DataIdentifier.name == rule.name).one()
# Detect if there is something wrong with the dataset and
# make the decisison on soft or hard repair.
hard_repair = False
if did.did_type != DIDType.FILE:
nr_files = rucio.core.did.get_did(scope=rule.scope, name=rule.name, dynamic=True, session=session)['length']
else:
nr_files = 1
if nr_files * rule.copies != (rule.locks_ok_cnt + rule.locks_stuck_cnt + rule.locks_replicating_cnt):
hard_repair = True
logger(logging.DEBUG, 'Repairing rule %s in HARD mode', str(rule.id))
elif rule.copies > 1 and rule.grouping == RuleGrouping.NONE:
hard_repair = True
logger(logging.DEBUG, 'Repairing rule %s in HARD mode', str(rule.id))
# Resolve the did to its contents
datasetfiles, locks, replicas, source_replicas = __resolve_did_to_locks_and_replicas(did=did,
nowait=True,
restrict_rses=[rse['id'] for rse in rses],
source_rses=[rse['id'] for rse in source_rses],
only_stuck=not hard_repair,
session=session)
session.flush()
# 1. Try to find missing locks and create them based on grouping
if did.did_type != DIDType.FILE and hard_repair:
try:
__find_missing_locks_and_create_them(datasetfiles=datasetfiles,
locks=locks,
replicas=replicas,
source_replicas=source_replicas,
rseselector=rseselector,
rule=rule,
source_rses=[rse['id'] for rse in source_rses],
session=session)
except (InsufficientAccountLimit, InsufficientTargetRSEs) as error:
rule.state = RuleState.STUCK
rule.error = (str(error)[:245] + '...') if len(str(error)) > 245 else str(error)
rule.save(session=session)
# Insert rule history
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.STUCK})
logger(logging.DEBUG, '%s while repairing rule %s', type(error).__name__, rule_id)
return
session.flush()
# 2. Try to find surplus locks and remove them
if hard_repair:
__find_surplus_locks_and_remove_them(datasetfiles=datasetfiles,
locks=locks,
replicas=replicas,
source_replicas=source_replicas,
rseselector=rseselector,
rule=rule,
source_rses=[rse['id'] for rse in source_rses],
session=session)
session.flush()
# 3. Try to find STUCK locks and repair them based on grouping
try:
__find_stuck_locks_and_repair_them(datasetfiles=datasetfiles,
locks=locks,
replicas=replicas,
source_replicas=source_replicas,
rseselector=rseselector,
rule=rule,
source_rses=[rse['id'] for rse in source_rses],
session=session)
except (InsufficientAccountLimit, InsufficientTargetRSEs) as error:
rule.state = RuleState.STUCK
rule.error = (str(error)[:245] + '...') if len(str(error)) > 245 else str(error)
rule.save(session=session)
# Insert rule history
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.STUCK})
logger(logging.DEBUG, '%s while repairing rule %s', type(error).__name__, rule_id)
return
# Delete Datasetlocks which are not relevant anymore
validated_datasetlock_rse_ids = [rse_id[0] for rse_id in session.query(models.ReplicaLock.rse_id).filter(models.ReplicaLock.rule_id == rule.id).group_by(models.ReplicaLock.rse_id).all()]
dataset_locks = session.query(models.DatasetLock).filter_by(rule_id=rule.id).all()
for dataset_lock in dataset_locks:
if dataset_lock.rse_id not in validated_datasetlock_rse_ids:
dataset_lock.delete(session=session)
if rule.locks_stuck_cnt != 0:
logger(logging.INFO, 'Rule %s [%d/%d/%d] state=STUCK', str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
rule.state = RuleState.STUCK
# Insert rule history
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.STUCK})
# TODO: Increase some kind of Stuck Counter here, The rule should at some point be SUSPENDED
return
rule.stuck_at = None
if rule.locks_replicating_cnt > 0:
logger(logging.INFO, 'Rule %s [%d/%d/%d] state=REPLICATING', str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
rule.state = RuleState.REPLICATING
rule.error = None
# Insert rule history
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.REPLICATING})
return
rule.state = RuleState.OK
rule.error = None
# Insert rule history
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
logger(logging.INFO, 'Rule %s [%d/%d/%d] state=OK', str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
if rule.grouping != RuleGrouping.NONE:
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.OK})
session.flush()
if rule.notification == RuleNotification.YES:
generate_email_for_rule_ok_notification(rule=rule, session=session)
generate_rule_notifications(rule=rule, replicating_locks_before=0, session=session)
# Try to release potential parent rules
rucio.core.rule.release_parent_rule(child_rule_id=rule.id, session=session)
return
except NoResultFound:
# The rule has been deleted in the meanwhile
return
@read_session
def get_rule(rule_id, session=None):
"""
Get a specific replication rule.
:param rule_id: The rule_id to select.
:param session: The database session in use.
:raises: RuleNotFound if no Rule can be found.
"""
try:
rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one()
d = {}
for column in rule.__table__.columns:
d[column.name] = getattr(rule, column.name)
return d
except NoResultFound:
raise RuleNotFound('No rule with the id %s found' % (rule_id))
except StatementError:
raise RucioException('Badly formatted rule id (%s)' % (rule_id))
@transactional_session
def update_rule(rule_id, options, session=None):
"""
Update a rules options.
:param rule_id: The rule_id to lock.
:param options: Dictionary of options
:param session: The database session in use.
:raises: RuleNotFound if no Rule can be found, InputValidationError if invalid option is used, ScratchDiskLifetimeConflict if wrong ScratchDiskLifetime is used.
"""
valid_options = ['comment', 'locked', 'lifetime', 'account', 'state', 'activity', 'source_replica_expression', 'cancel_requests', 'priority', 'child_rule_id', 'eol_at', 'meta', 'purge_replicas', 'boost_rule']
for key in options:
if key not in valid_options:
raise InputValidationError('%s is not a valid option to set.' % key)
try:
rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one()
for key in options:
if key == 'lifetime':
# Check SCRATCHDISK Policy
vo = rule.account.vo
rses = parse_expression(rule.rse_expression, filter_={'vo': vo}, session=session)
try:
lifetime = get_scratch_policy(rule.account, rses, options['lifetime'], session=session)
except UndefinedPolicy:
lifetime = options['lifetime']
rule.expires_at = datetime.utcnow() + timedelta(seconds=lifetime) if lifetime is not None else None
if key == 'source_replica_expression':
rule.source_replica_expression = options['source_replica_expression']
if key == 'comment':
rule.comments = options['comment']
if key == 'activity':
validate_schema('activity', options['activity'], vo=rule.account.vo)
rule.activity = options['activity']
# Cancel transfers and re-submit them:
for lock in session.query(models.ReplicaLock).filter_by(rule_id=rule.id, state=LockState.REPLICATING).all():
transfers_to_cancel = request_core.cancel_request_did(scope=lock.scope, name=lock.name, dest_rse_id=lock.rse_id, session=session)
transfer_core.cancel_transfers(transfers_to_cancel)
md5, bytes_, adler32 = session.query(models.RSEFileAssociation.md5, models.RSEFileAssociation.bytes, models.RSEFileAssociation.adler32).filter(models.RSEFileAssociation.scope == lock.scope,
models.RSEFileAssociation.name == lock.name,
models.RSEFileAssociation.rse_id == lock.rse_id).one()
session.flush()
request_core.queue_requests(requests=[create_transfer_dict(dest_rse_id=lock.rse_id,
request_type=RequestType.TRANSFER,
scope=lock.scope, name=lock.name, rule=rule, lock=lock, bytes_=bytes_, md5=md5, adler32=adler32,
ds_scope=rule.scope, ds_name=rule.name, lifetime=None, activity=rule.activity, session=session)], session=session)
elif key == 'account':
# Check if the account exists
get_account(options['account'], session=session)
# Update locks
locks = session.query(models.ReplicaLock).filter_by(rule_id=rule.id).all()
counter_rses = {}
for lock in locks:
if lock.rse_id in counter_rses:
counter_rses[lock.rse_id].append(lock.bytes)
else:
counter_rses[lock.rse_id] = [lock.bytes]
session.query(models.ReplicaLock).filter_by(rule_id=rule.id).update({'account': options['account']})
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'account': options['account']})
# Update counters
for rse_id in counter_rses:
account_counter.decrease(rse_id=rse_id, account=rule.account, files=len(counter_rses[rse_id]), bytes_=sum(counter_rses[rse_id]), session=session)
account_counter.increase(rse_id=rse_id, account=options['account'], files=len(counter_rses[rse_id]), bytes_=sum(counter_rses[rse_id]), session=session)
# Update rule
rule.account = options['account']
session.flush()
elif key == 'state':
if options.get('cancel_requests', False):
rule_ids_to_stuck = set()
for lock in session.query(models.ReplicaLock).filter_by(rule_id=rule.id, state=LockState.REPLICATING).all():
# Set locks to stuck:
for lock2 in session.query(models.ReplicaLock).filter_by(scope=lock.scope, name=lock.name, rse_id=lock.rse_id, state=LockState.REPLICATING).all():
lock2.state = LockState.STUCK
rule_ids_to_stuck.add(lock2.rule_id)
transfers_to_cancel = request_core.cancel_request_did(scope=lock.scope, name=lock.name, dest_rse_id=lock.rse_id, session=session)
transfer_core.cancel_transfers(transfers_to_cancel)
replica = session.query(models.RSEFileAssociation).filter(
models.RSEFileAssociation.scope == lock.scope,
models.RSEFileAssociation.name == lock.name,
models.RSEFileAssociation.rse_id == lock.rse_id).one()
replica.state = ReplicaState.UNAVAILABLE
# Set rules and DATASETLOCKS to STUCK:
for rid in rule_ids_to_stuck:
session.query(models.ReplicationRule).filter(models.ReplicationRule.id == rid,
models.ReplicationRule.state != RuleState.SUSPENDED).update({'state': RuleState.STUCK})
session.query(models.DatasetLock).filter_by(rule_id=rid).update({'state': LockState.STUCK})
if options['state'].lower() == 'suspended':
rule.state = RuleState.SUSPENDED
elif options['state'].lower() == 'stuck':
rule.state = RuleState.STUCK
rule.stuck_at = datetime.utcnow()
if not options.get('cancel_requests', False):
session.query(models.ReplicaLock).filter_by(rule_id=rule.id, state=LockState.REPLICATING).update({'state': LockState.STUCK})
session.query(models.DatasetLock).filter_by(rule_id=rule.id).update({'state': LockState.STUCK})
elif key == 'cancel_requests':
pass
elif key == 'priority':
try:
rule.priority = options[key]
transfers_to_update = request_core.update_requests_priority(priority=options[key], filter_={'rule_id': rule_id}, session=session)
transfer_core.update_transfer_priority(transfers_to_update)
except Exception:
raise UnsupportedOperation('The FTS Requests are already in a final state.')
elif key == 'child_rule_id':
# Check if the child rule has the same scope/name as the parent rule
child_rule = session.query(models.ReplicationRule).filter_by(id=options[key]).one()
if rule.scope != child_rule.scope or rule.name != child_rule.name:
raise InputValidationError('Parent and child rule must be set on the same dataset.')
if child_rule.state != RuleState.OK:
rule.child_rule_id = options[key]
elif key == 'meta':
# Need to json.dump the metadata
rule.meta = json.dumps(options[key])
else:
setattr(rule, key, options[key])
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
# `boost_rule` should run after `stuck`, so lets not include it in the loop since the arguments are unordered
if 'boost_rule' in options:
for lock in session.query(models.ReplicaLock).filter_by(rule_id=rule.id, state=LockState.STUCK).all():
lock['updated_at'] -= timedelta(days=1)
rule['updated_at'] -= timedelta(days=1)
insert_rule_history(rule, recent=True, longterm=False, session=session)
except IntegrityError as error:
if match('.*ORA-00001.*', str(error.args[0])) \
or match('.*IntegrityError.*UNIQUE constraint failed.*', str(error.args[0])) \
or match('.*1062.*Duplicate entry.*for key.*', str(error.args[0])) \
or match('.*IntegrityError.*columns? .*not unique.*', str(error.args[0])):
raise DuplicateRule(error.args[0])
else:
raise error
except NoResultFound:
raise RuleNotFound('No rule with the id | |
bytes_reserved=None,
*, appearance_text_params=None,
in_place=False, output=None,
chunk_size=misc.DEFAULT_CHUNK_SIZE)\
-> Tuple[PreparedByteRangeDigest, 'PdfTBSDocument', IO]:
"""
.. deprecated:: 0.9.0
Use :meth:`async_digest_doc_for_signing` instead.
Set up all stages of the signing process up to and including the point
where the signature placeholder is allocated, and the document's
``/ByteRange`` digest is computed.
See :meth:`sign_pdf` for a less granular, more high-level approach.
.. note::
This method is useful in remote signing scenarios, where you might
want to free up resources while waiting for the remote signer to
respond. The :class:`.PreparedByteRangeDigest` object returned
allows you to keep track of the required state to fill the
signature container at some later point in time.
:param pdf_out:
A PDF file writer (usually an :class:`.IncrementalPdfFileWriter`)
containing the data to sign.
:param existing_fields_only:
If ``True``, never create a new empty signature field to contain
the signature.
If ``False``, a new field may be created if no field matching
:attr:`~.PdfSignatureMetadata.field_name` exists.
:param bytes_reserved:
Bytes to reserve for the CMS object in the PDF file.
If not specified, make an estimate based on a dummy signature.
.. warning::
Since the CMS object is written to the output file as a
hexadecimal string, you should request **twice** the (estimated)
number of bytes in the DER-encoded version of the CMS object.
:param appearance_text_params:
Dictionary with text parameters that will be passed to the
signature appearance constructor (if applicable).
:param output:
Write the output to the specified output stream.
If ``None``, write to a new :class:`.BytesIO` object.
Default is ``None``.
:param in_place:
Sign the original input stream in-place.
This parameter overrides ``output``.
:param chunk_size:
Size of the internal buffer (in bytes) used to feed data to the
message digest function if the input stream does not support
``memoryview``.
:return:
A tuple containing a :class:`.PreparedByteRangeDigest` object,
a :class:`.PdfTBSDocument` object and an output handle to which the
document in its current state has been written.
"""
warnings.warn(
"'digest_doc_for_signing' is deprecated, use "
"'async_digest_doc_for_signing' instead",
DeprecationWarning
)
result = asyncio.run(
self.async_digest_doc_for_signing(
pdf_out, existing_fields_only=existing_fields_only,
bytes_reserved=bytes_reserved,
appearance_text_params=appearance_text_params,
in_place=in_place, output=output, chunk_size=chunk_size
)
)
return result
async def async_digest_doc_for_signing(self, pdf_out: BasePdfFileWriter,
existing_fields_only=False,
bytes_reserved=None,
*, appearance_text_params=None,
in_place=False, output=None,
chunk_size=misc.DEFAULT_CHUNK_SIZE) \
-> Tuple[PreparedByteRangeDigest, 'PdfTBSDocument', IO]:
"""
.. versionadded:: 0.9.0
Set up all stages of the signing process up to and including the point
where the signature placeholder is allocated, and the document's
``/ByteRange`` digest is computed.
See :meth:`sign_pdf` for a less granular, more high-level approach.
.. note::
This method is useful in remote signing scenarios, where you might
want to free up resources while waiting for the remote signer to
respond. The :class:`.PreparedByteRangeDigest` object returned
allows you to keep track of the required state to fill the
signature container at some later point in time.
:param pdf_out:
A PDF file writer (usually an :class:`.IncrementalPdfFileWriter`)
containing the data to sign.
:param existing_fields_only:
If ``True``, never create a new empty signature field to contain
the signature.
If ``False``, a new field may be created if no field matching
:attr:`~.PdfSignatureMetadata.field_name` exists.
:param bytes_reserved:
Bytes to reserve for the CMS object in the PDF file.
If not specified, make an estimate based on a dummy signature.
.. warning::
Since the CMS object is written to the output file as a
hexadecimal string, you should request **twice** the (estimated)
number of bytes in the DER-encoded version of the CMS object.
:param appearance_text_params:
Dictionary with text parameters that will be passed to the
signature appearance constructor (if applicable).
:param output:
Write the output to the specified output stream.
If ``None``, write to a new :class:`.BytesIO` object.
Default is ``None``.
:param in_place:
Sign the original input stream in-place.
This parameter overrides ``output``.
:param chunk_size:
Size of the internal buffer (in bytes) used to feed data to the
message digest function if the input stream does not support
``memoryview``.
:return:
A tuple containing a :class:`.PreparedByteRangeDigest` object,
a :class:`.PdfTBSDocument` object and an output handle to which the
document in its current state has been written.
"""
signing_session = self.init_signing_session(
pdf_out, existing_fields_only=existing_fields_only,
)
validation_info \
= await signing_session.perform_presign_validation(pdf_out)
if bytes_reserved is None:
estimation = signing_session.estimate_signature_container_size(
validation_info=validation_info,
tight=self.signature_meta.tight_size_estimates
)
bytes_reserved = await estimation
tbs_document = signing_session.prepare_tbs_document(
validation_info=validation_info,
bytes_reserved=bytes_reserved,
appearance_text_params=appearance_text_params
)
prepared_br_digest, res_output = tbs_document.digest_tbs_document(
in_place=in_place, chunk_size=chunk_size, output=output
)
return (
prepared_br_digest, tbs_document,
misc.finalise_output(output, res_output)
)
def sign_pdf(self, pdf_out: BasePdfFileWriter,
existing_fields_only=False, bytes_reserved=None, *,
appearance_text_params=None, in_place=False,
output=None, chunk_size=misc.DEFAULT_CHUNK_SIZE):
"""
.. versionchanged:: 0.9.0
Wrapper around :meth:`async_sign_pdf`.
Sign a PDF file using the provided output writer.
:param pdf_out:
A PDF file writer (usually an :class:`.IncrementalPdfFileWriter`)
containing the data to sign.
:param existing_fields_only:
If ``True``, never create a new empty signature field to contain
the signature.
If ``False``, a new field may be created if no field matching
:attr:`~.PdfSignatureMetadata.field_name` exists.
:param bytes_reserved:
Bytes to reserve for the CMS object in the PDF file.
If not specified, make an estimate based on a dummy signature.
:param appearance_text_params:
Dictionary with text parameters that will be passed to the
signature appearance constructor (if applicable).
:param output:
Write the output to the specified output stream.
If ``None``, write to a new :class:`.BytesIO` object.
Default is ``None``.
:param in_place:
Sign the original input stream in-place.
This parameter overrides ``output``.
:param chunk_size:
Size of the internal buffer (in bytes) used to feed data to the
message digest function if the input stream does not support
``memoryview``.
:return:
The output stream containing the signed data.
"""
result = asyncio.run(
self.async_sign_pdf(
pdf_out, existing_fields_only=existing_fields_only,
bytes_reserved=bytes_reserved,
appearance_text_params=appearance_text_params,
in_place=in_place, output=output, chunk_size=chunk_size
)
)
return result
async def async_sign_pdf(self, pdf_out: BasePdfFileWriter,
existing_fields_only=False, bytes_reserved=None, *,
appearance_text_params=None, in_place=False,
output=None, chunk_size=misc.DEFAULT_CHUNK_SIZE):
"""
.. versionadded:: 0.9.0
Sign a PDF file using the provided output writer.
:param pdf_out:
A PDF file writer (usually an :class:`.IncrementalPdfFileWriter`)
containing the data to sign.
:param existing_fields_only:
If ``True``, never create a new empty signature field to contain
the signature.
If ``False``, a new field may be created if no field matching
:attr:`~.PdfSignatureMetadata.field_name` exists.
:param bytes_reserved:
Bytes to reserve for the CMS object in the PDF file.
If not specified, make an estimate based on a dummy signature.
:param appearance_text_params:
Dictionary with text parameters that will be passed to the
signature appearance constructor (if applicable).
:param output:
Write the output to the specified output stream.
If ``None``, write to a new :class:`.BytesIO` object.
Default is ``None``.
:param in_place:
Sign the original input stream in-place.
This parameter overrides ``output``.
:param chunk_size:
Size of the internal buffer (in bytes) used to feed data to the
message digest function if the input stream does not support
``memoryview``.
:return:
The output stream containing the signed data.
"""
signing_session = self.init_signing_session(
pdf_out, existing_fields_only=existing_fields_only,
)
validation_info = \
await signing_session.perform_presign_validation(pdf_out)
if bytes_reserved is None:
estimation = signing_session.estimate_signature_container_size(
validation_info, tight=self.signature_meta.tight_size_estimates
)
bytes_reserved = await estimation
tbs_document = signing_session.prepare_tbs_document(
validation_info=validation_info,
bytes_reserved=bytes_reserved,
appearance_text_params=appearance_text_params
)
prepared_br_digest, res_output = tbs_document.digest_tbs_document(
in_place=in_place, chunk_size=chunk_size, output=output
)
post_signing_doc = await tbs_document.perform_signature(
document_digest=prepared_br_digest.document_digest,
pdf_cms_signed_attrs=PdfCMSSignedAttributes(
signing_time=signing_session.system_time,
adobe_revinfo_attr=(
None if validation_info is None else
validation_info.adobe_revinfo_attr
),
cades_signed_attrs=self.signature_meta.cades_signed_attr_spec
)
)
await post_signing_doc.post_signature_processing(
res_output, chunk_size=chunk_size
)
# we put the finalisation step after the DSS manipulations, since
# otherwise we'd also run into issues with non-seekable output buffers
return misc.finalise_output(output, res_output)
@dataclass(frozen=True)
class PreSignValidationStatus:
"""
.. versionadded:: 0.7.0
Container for validation data collected prior to creating a signature, e.g.
for later inclusion in a document's DSS, or as a signed attribute on
the signature.
"""
signer_path: ValidationPath
"""
Validation path for the signer's certificate.
"""
validation_paths: List[ValidationPath]
"""
List of other relevant validation paths.
"""
ts_validation_paths: Optional[List[ValidationPath]] = None
"""
List of validation paths relevant for embedded timestamps.
"""
adobe_revinfo_attr: Optional[asn1_pdf.RevocationInfoArchival] = None
"""
Preformatted revocation info attribute to include, if requested by the
settings.
"""
ocsps_to_embed: List[ocsp.OCSPResponse] = None
"""
List of OCSP responses collected so far.
"""
crls_to_embed: List[crl.CertificateList] = None
"""
List of CRLS collected so far.
"""
ac_validation_paths: Optional[List[ValidationPath]] = None
"""
List of validation paths relevant for embedded attribute certificates.
"""
class PdfSigningSession:
"""
.. versionadded:: 0.7.0
Class modelling a PDF signing | |
<reponame>chigur/pose<gh_stars>0
import os
import re
import sys
import cv2
import math
import time
import scipy
import argparse
import matplotlib
from torch import np
import pylab as plt
from joblib import Parallel, delayed
import util
import torch
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from config_reader import config_reader
from scipy.ndimage.filters import gaussian_filter
import glob
import os
from stat import *
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import json
#parser = argparse.ArgumentParser()
#parser.add_argument('--t7_file', required=True)
#parser.add_argument('--pth_file', required=True)
#args = parser.parse_args()
torch.set_num_threads(torch.get_num_threads())
weight_name = './model/pose_model.pth'
blocks = {}
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
[10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
[1,16], [16,18], [3,17], [6,18]]
# the middle joints heatmap correpondence
mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], \
[23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], \
[55,56], [37,38], [45,46]]
# visualize
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
block0 = [{'conv1_1':[3,64,3,1,1]},{'conv1_2':[64,64,3,1,1]},{'pool1_stage1':[2,2,0]},{'conv2_1':[64,128,3,1,1]},{'conv2_2':[128,128,3,1,1]},{'pool2_stage1':[2,2,0]},{'conv3_1':[128,256,3,1,1]},{'conv3_2':[256,256,3,1,1]},{'conv3_3':[256,256,3,1,1]},{'conv3_4':[256,256,3,1,1]},{'pool3_stage1':[2,2,0]},{'conv4_1':[256,512,3,1,1]},{'conv4_2':[512,512,3,1,1]},{'conv4_3_CPM':[512,256,3,1,1]},{'conv4_4_CPM':[256,128,3,1,1]}]
blocks['block1_1'] = [{'conv5_1_CPM_L1':[128,128,3,1,1]},{'conv5_2_CPM_L1':[128,128,3,1,1]},{'conv5_3_CPM_L1':[128,128,3,1,1]},{'conv5_4_CPM_L1':[128,512,1,1,0]},{'conv5_5_CPM_L1':[512,38,1,1,0]}]
blocks['block1_2'] = [{'conv5_1_CPM_L2':[128,128,3,1,1]},{'conv5_2_CPM_L2':[128,128,3,1,1]},{'conv5_3_CPM_L2':[128,128,3,1,1]},{'conv5_4_CPM_L2':[128,512,1,1,0]},{'conv5_5_CPM_L2':[512,19,1,1,0]}]
for i in range(2,7):
blocks['block%d_1'%i] = [{'Mconv1_stage%d_L1'%i:[185,128,7,1,3]},{'Mconv2_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv3_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv4_stage%d_L1'%i:[128,128,7,1,3]},
{'Mconv5_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv6_stage%d_L1'%i:[128,128,1,1,0]},{'Mconv7_stage%d_L1'%i:[128,38,1,1,0]}]
blocks['block%d_2'%i] = [{'Mconv1_stage%d_L2'%i:[185,128,7,1,3]},{'Mconv2_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv3_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv4_stage%d_L2'%i:[128,128,7,1,3]},
{'Mconv5_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv6_stage%d_L2'%i:[128,128,1,1,0]},{'Mconv7_stage%d_L2'%i:[128,19,1,1,0]}]
def make_layers(cfg_dict):
layers = []
for i in range(len(cfg_dict)-1):
one_ = cfg_dict[i]
for k,v in one_.iteritems():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2] )]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
one_ = cfg_dict[-1].keys()
k = one_[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers)
layers = []
for i in range(len(block0)):
one_ = block0[i]
for k,v in one_.iteritems():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2] )]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
models = {}
models['block0']=nn.Sequential(*layers)
for k,v in blocks.iteritems():
models[k] = make_layers(v)
class pose_model(nn.Module):
def __init__(self,model_dict,transform_input=False):
super(pose_model, self).__init__()
self.model0 = model_dict['block0']
self.model1_1 = model_dict['block1_1']
self.model2_1 = model_dict['block2_1']
self.model3_1 = model_dict['block3_1']
self.model4_1 = model_dict['block4_1']
self.model5_1 = model_dict['block5_1']
self.model6_1 = model_dict['block6_1']
self.model1_2 = model_dict['block1_2']
self.model2_2 = model_dict['block2_2']
self.model3_2 = model_dict['block3_2']
self.model4_2 = model_dict['block4_2']
self.model5_2 = model_dict['block5_2']
self.model6_2 = model_dict['block6_2']
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1,out1_2,out1],1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1,out2_2,out1],1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1,out3_2,out1],1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1,out4_2,out1],1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1,out5_2,out1],1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1,out6_2
model = pose_model(models)
model.load_state_dict(torch.load(weight_name))
model.cuda()
model.float()
model.eval()
param_, model_ = config_reader()
def handle_one(oriImg, bounding_box):
#oriImg = cv2.imread(oriImgpath)
# for visualize
canvas = np.copy(oriImg)
canvas_white = np.ones(oriImg.shape)
canvas_white[:] = (255, 255, 255)
imageToTest = Variable(T.transpose(T.transpose(T.unsqueeze(torch.from_numpy(oriImg).float(),0),2,3),1,2),volatile=True).cuda()
#print oriImg.shape
scale = model_['boxsize'] / float(oriImg.shape[0])
#print scale
h = int(oriImg.shape[0]*scale)
w = int(oriImg.shape[1]*scale)
pad_h = 0 if (h%model_['stride']==0) else model_['stride'] - (h % model_['stride'])
pad_w = 0 if (w%model_['stride']==0) else model_['stride'] - (w % model_['stride'])
new_h = h+pad_h
new_w = w+pad_w
imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_['stride'], model_['padValue'])
imageToTest_padded = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,2,0,1))/256 - 0.5
feed = Variable(T.from_numpy(imageToTest_padded)).cuda()
output1,output2 = model(feed)
heatmap = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output2)
paf = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output1)
#print heatmap.size()
#print paf.size()
#print type(heatmap)
heatmap_avg = T.transpose(T.transpose(heatmap[0],0,1),1,2).data.cpu().numpy()
paf_avg = T.transpose(T.transpose(paf[0],0,1),1,2).data.cpu().numpy()
all_peaks = []
peak_counter = 0
#maps =
for part in range(18):
map_ori = heatmap_avg[:,:,part]
map = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(map.shape)
map_left[1:,:] = map[:-1,:]
map_right = np.zeros(map.shape)
map_right[:-1,:] = map[1:,:]
map_up = np.zeros(map.shape)
map_up[:,1:] = map[:,:-1]
map_down = np.zeros(map.shape)
map_down[:,:-1] = map[:,1:]
peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > param_['thre1']))
# peaks_binary = T.eq(
# peaks = zip(T.nonzero(peaks_binary)[0],T.nonzero(peaks_binary)[0])
peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse
peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:,:,[x-19 for x in mapIdx[k]]]
candA = all_peaks[limbSeq[k][0]-1]
candB = all_peaks[limbSeq[k][1]-1]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
if(nA != 0 and nB != 0):
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1]) + 1e-8 # changed
vec = np.divide(vec, norm)
startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
np.linspace(candA[i][1], candB[j][1], num=mid_num))
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts)/len(score_midpts) + min(0.5*oriImg.shape[0]/norm-1, 0)
criterion1 = len(np.nonzero(score_midpts > param_['thre2'])[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
connection = np.zeros((0,5))
for c in range(len(connection_candidate)):
i,j,s = connection_candidate[c][0:3]
if(i not in connection[:,3] and j not in connection[:,4]):
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
if(len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 20))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:,0]
partBs = connection_all[k][:,1]
indexA, indexB = np.array(limbSeq[k]) - 1
for i in range(len(connection_all[k])): #= 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): #1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if(subset[j][indexB] != partBs[i]):
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
#print "found = 2"
membership = ((subset[j1]>=0).astype(int) + (subset[j2]>=0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: #merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i,:2].astype(int), 2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = [];
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2]/subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
# canvas = cv2.imread(test_image) # B,G,R order
# for i in range(18):
# for j in range(len(all_peaks[i])):
# cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
# cv2.circle(canvas_white, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
# separate joints per skeleton
joints_per_skeleton = [[] for i in range(len(subset))]
for i in range(18):
#print(i)
for n in range(len(subset)):
#print n
index = int(subset[n][i])
#print index
if -1 == index:
joints_per_skeleton[n].append(None)
continue
Y = candidate[index, 0]
X = candidate[index, 1]
#print(Y,X)
joints_per_skeleton[n].append((Y,X))
flag = False
speaker_skeleton_index = -1
# loop through all the skeletons
for i in range(len(joints_per_skeleton)):
# if left eye and right eye or nose inside bounding box, say this is speaker's skeleton
if isinsidebox(bounding_box, joints_per_skeleton[i][0]) or isinsidebox(bounding_box, joints_per_skeleton[i][15]) or isinsidebox(bounding_box, joints_per_skeleton[i][14]):
speaker_skeleton_index = i
# check if either left or right hip of speaker is visible
if joints_per_skeleton[i][8] is not None or joints_per_skeleton[i][11] is not None:
flag = True
break
else:
continue
if speaker_skeleton_index != -1
stickwidth = 4
for i in range(17):
for n in range(speaker_skeleton_index,speaker_skeleton_index+1):
index = subset[n][np.array(limbSeq[i])-1]
if -1 in index:
continue
cur_canvas = canvas.copy()
cur_canvas_white = canvas_white.copy()
Y = candidate[index.astype(int), 0]
X = candidate[index.astype(int), 1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY),int(mX)), (int(length/2), stickwidth), int(angle), 0, 360, 1)
| |
ConfigMapNamespace: ConfigMap命名空间
:type ConfigMapNamespace: str
"""
self.EdgeUnitID = None
self.ConfigMapName = None
self.ConfigMapNamespace = None
def _deserialize(self, params):
self.EdgeUnitID = params.get("EdgeUnitID")
self.ConfigMapName = params.get("ConfigMapName")
self.ConfigMapNamespace = params.get("ConfigMapNamespace")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeConfigMapResponse(AbstractModel):
"""DescribeConfigMap返回参数结构体
"""
def __init__(self):
r"""
:param Name: 名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param Namespace: 命名空间
注意:此字段可能返回 null,表示取不到有效值。
:type Namespace: str
:param CreateTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param Yaml: yaml配置
注意:此字段可能返回 null,表示取不到有效值。
:type Yaml: str
:param Json: 配置项的json格式(base64编码)
注意:此字段可能返回 null,表示取不到有效值。
:type Json: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Name = None
self.Namespace = None
self.CreateTime = None
self.Yaml = None
self.Json = None
self.RequestId = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Namespace = params.get("Namespace")
self.CreateTime = params.get("CreateTime")
self.Yaml = params.get("Yaml")
self.Json = params.get("Json")
self.RequestId = params.get("RequestId")
class DescribeConfigMapYamlErrorRequest(AbstractModel):
"""DescribeConfigMapYamlError请求参数结构体
"""
def __init__(self):
r"""
:param Yaml: yaml文件
:type Yaml: str
"""
self.Yaml = None
def _deserialize(self, params):
self.Yaml = params.get("Yaml")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeConfigMapYamlErrorResponse(AbstractModel):
"""DescribeConfigMapYamlError返回参数结构体
"""
def __init__(self):
r"""
:param CheckPass: 校验是通过
注意:此字段可能返回 null,表示取不到有效值。
:type CheckPass: bool
:param ErrType: 错误类型
注意:此字段可能返回 null,表示取不到有效值。
:type ErrType: int
:param ErrInfo: 错误信息
注意:此字段可能返回 null,表示取不到有效值。
:type ErrInfo: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CheckPass = None
self.ErrType = None
self.ErrInfo = None
self.RequestId = None
def _deserialize(self, params):
self.CheckPass = params.get("CheckPass")
self.ErrType = params.get("ErrType")
self.ErrInfo = params.get("ErrInfo")
self.RequestId = params.get("RequestId")
class DescribeConfigMapsRequest(AbstractModel):
"""DescribeConfigMaps请求参数结构体
"""
def __init__(self):
r"""
:param EdgeUnitID: 单元ID
:type EdgeUnitID: int
:param Offset: 翻页偏移量
:type Offset: int
:param Limit: 每页大小(最大100)
:type Limit: int
:param ConfigMapNamespace: 命名空间
:type ConfigMapNamespace: str
:param NamePattern: 模糊匹配的名称
:type NamePattern: str
:param Sort: Sort.Fileld填写CreateTime Sort.Order(ASC|DESC) 默认ASC
:type Sort: :class:`tencentcloud.iecp.v20210914.models.FieldSort`
"""
self.EdgeUnitID = None
self.Offset = None
self.Limit = None
self.ConfigMapNamespace = None
self.NamePattern = None
self.Sort = None
def _deserialize(self, params):
self.EdgeUnitID = params.get("EdgeUnitID")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ConfigMapNamespace = params.get("ConfigMapNamespace")
self.NamePattern = params.get("NamePattern")
if params.get("Sort") is not None:
self.Sort = FieldSort()
self.Sort._deserialize(params.get("Sort"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeConfigMapsResponse(AbstractModel):
"""DescribeConfigMaps返回参数结构体
"""
def __init__(self):
r"""
:param Items: ConfigMap列表
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of ConfigMapBasicInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Items = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = ConfigMapBasicInfo()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDracoEdgeNodeInstallerRequest(AbstractModel):
"""DescribeDracoEdgeNodeInstaller请求参数结构体
"""
def __init__(self):
r"""
:param SN: 设备SN
:type SN: str
"""
self.SN = None
def _deserialize(self, params):
self.SN = params.get("SN")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDracoEdgeNodeInstallerResponse(AbstractModel):
"""DescribeDracoEdgeNodeInstaller返回参数结构体
"""
def __init__(self):
r"""
:param OnlineInstallationCommand: 在线安装命名
注意:此字段可能返回 null,表示取不到有效值。
:type OnlineInstallationCommand: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.OnlineInstallationCommand = None
self.RequestId = None
def _deserialize(self, params):
self.OnlineInstallationCommand = params.get("OnlineInstallationCommand")
self.RequestId = params.get("RequestId")
class DescribeEdgeAgentNodeInstallerRequest(AbstractModel):
"""DescribeEdgeAgentNodeInstaller请求参数结构体
"""
def __init__(self):
r"""
:param EdgeUnitId: IECP边缘单元ID
:type EdgeUnitId: int
:param NodeId: IECP边缘节点ID
:type NodeId: int
"""
self.EdgeUnitId = None
self.NodeId = None
def _deserialize(self, params):
self.EdgeUnitId = params.get("EdgeUnitId")
self.NodeId = params.get("NodeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeEdgeAgentNodeInstallerResponse(AbstractModel):
"""DescribeEdgeAgentNodeInstaller返回参数结构体
"""
def __init__(self):
r"""
:param Online: 节点在线安装信息
注意:此字段可能返回 null,表示取不到有效值。
:type Online: :class:`tencentcloud.iecp.v20210914.models.EdgeNodeInstallerOnline`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Online = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Online") is not None:
self.Online = EdgeNodeInstallerOnline()
self.Online._deserialize(params.get("Online"))
self.RequestId = params.get("RequestId")
class DescribeEdgeDefaultVpcRequest(AbstractModel):
"""DescribeEdgeDefaultVpc请求参数结构体
"""
class DescribeEdgeDefaultVpcResponse(AbstractModel):
"""DescribeEdgeDefaultVpc返回参数结构体
"""
def __init__(self):
r"""
:param VpcId: 私有网络ID
注意:此字段可能返回 null,表示取不到有效值。
:type VpcId: str
:param VpcCidrBlock: 网络CIDR
注意:此字段可能返回 null,表示取不到有效值。
:type VpcCidrBlock: str
:param SubnetId: 子网ID
注意:此字段可能返回 null,表示取不到有效值。
:type SubnetId: str
:param SubnetCidrBlock: 子网CIDR
注意:此字段可能返回 null,表示取不到有效值。
:type SubnetCidrBlock: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.VpcId = None
self.VpcCidrBlock = None
self.SubnetId = None
self.SubnetCidrBlock = None
self.RequestId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.VpcCidrBlock = params.get("VpcCidrBlock")
self.SubnetId = params.get("SubnetId")
self.SubnetCidrBlock = params.get("SubnetCidrBlock")
self.RequestId = params.get("RequestId")
class DescribeEdgeNodePodContainersRequest(AbstractModel):
"""DescribeEdgeNodePodContainers请求参数结构体
"""
def __init__(self):
r"""
:param EdgeUnitId: IECP边缘单元ID
:type EdgeUnitId: int
:param NodeId: 节点ID
:type NodeId: int
:param PodName: Pod名称
:type PodName: str
:param Namespace: 命名空间
:type Namespace: str
"""
self.EdgeUnitId = None
self.NodeId = None
self.PodName = None
self.Namespace = None
def _deserialize(self, params):
self.EdgeUnitId = params.get("EdgeUnitId")
self.NodeId = params.get("NodeId")
self.PodName = params.get("PodName")
self.Namespace = params.get("Namespace")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeEdgeNodePodContainersResponse(AbstractModel):
"""DescribeEdgeNodePodContainers返回参数结构体
"""
def __init__(self):
r"""
:param ContainerSet: Pod容器列表
注意:此字段可能返回 null,表示取不到有效值。
:type ContainerSet: list of EdgeNodePodContainerInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ContainerSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ContainerSet") is not None:
self.ContainerSet = []
for item in params.get("ContainerSet"):
obj = EdgeNodePodContainerInfo()
obj._deserialize(item)
self.ContainerSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeEdgeNodePodsRequest(AbstractModel):
"""DescribeEdgeNodePods请求参数结构体
"""
def __init__(self):
r"""
:param EdgeUnitId: IECP边缘单元ID
:type EdgeUnitId: int
:param NodeId: 节点ID
:type NodeId: int
:param Namespace: 命名空间
:type Namespace: str
:param PodNamePattern: Pod名称过滤串
:type PodNamePattern: str
"""
self.EdgeUnitId = None
self.NodeId = None
self.Namespace = None
self.PodNamePattern = None
def _deserialize(self, params):
self.EdgeUnitId = params.get("EdgeUnitId")
self.NodeId = params.get("NodeId")
self.Namespace = params.get("Namespace")
self.PodNamePattern = params.get("PodNamePattern")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeEdgeNodePodsResponse(AbstractModel):
"""DescribeEdgeNodePods返回参数结构体
"""
def __init__(self):
r"""
:param PodSet: Pod列表
注意:此字段可能返回 null,表示取不到有效值。
:type PodSet: list of EdgeNodePodInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PodSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PodSet") is not None:
self.PodSet = []
for item in params.get("PodSet"):
obj = EdgeNodePodInfo()
obj._deserialize(item)
self.PodSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeEdgeNodeRemarkListRequest(AbstractModel):
"""DescribeEdgeNodeRemarkList请求参数结构体
"""
def __init__(self):
r"""
:param EdgeUnitId: 边缘单元ID
:type EdgeUnitId: int
"""
self.EdgeUnitId = None
def _deserialize(self, params):
self.EdgeUnitId = params.get("EdgeUnitId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeEdgeNodeRemarkListResponse(AbstractModel):
"""DescribeEdgeNodeRemarkList返回参数结构体
"""
def __init__(self):
r"""
:param Remarks: 边缘单元内的备注列表
注意:此字段可能返回 null,表示取不到有效值。
:type Remarks: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Remarks = None
self.RequestId = None
def _deserialize(self, params):
self.Remarks = params.get("Remarks")
self.RequestId = params.get("RequestId")
class DescribeEdgeNodeRequest(AbstractModel):
"""DescribeEdgeNode请求参数结构体
"""
def __init__(self):
r"""
:param EdgeUnitId: IECP边缘单元ID
:type EdgeUnitId: int
:param NodeId: IECP边缘节点ID
:type NodeId: int
"""
self.EdgeUnitId = None
self.NodeId = None
def _deserialize(self, params):
self.EdgeUnitId = params.get("EdgeUnitId")
self.NodeId = params.get("NodeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeEdgeNodeResponse(AbstractModel):
"""DescribeEdgeNode返回参数结构体
"""
def __init__(self):
r"""
:param Id: 节点ID
注意:此字段可能返回 null,表示取不到有效值。
:type Id: int
:param Kind: 节点类型
注意:此字段可能返回 null,表示取不到有效值。
:type Kind: str
:param Name: 节点名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param Status: 节点状态 (1健康|2异常|3离线|4未激活)
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param CpuArchitecture: CPU体系结构
注意:此字段可能返回 null,表示取不到有效值。
:type CpuArchitecture: str
:param AiChipArchitecture: AI处理器体系结构
注意:此字段可能返回 null,表示取不到有效值。
:type AiChipArchitecture: str
:param Ip: IP地址
注意:此字段可能返回 null,表示取不到有效值。
:type Ip: str
:param Labels: 节点标签列表
:type Labels: list of EdgeNodeLabel
:param Resource: 节点资源信息
注意:此字段可能返回 null,表示取不到有效值。
:type Resource: :class:`tencentcloud.iecp.v20210914.models.EdgeNodeResourceInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Id = None
self.Kind = None
self.Name = None
self.Status = None
self.CpuArchitecture = None
self.AiChipArchitecture = None
self.Ip = None
self.Labels = None
self.Resource = None
self.RequestId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Kind = params.get("Kind")
self.Name = params.get("Name")
self.Status = params.get("Status")
self.CpuArchitecture = params.get("CpuArchitecture")
self.AiChipArchitecture = params.get("AiChipArchitecture")
self.Ip = params.get("Ip")
if params.get("Labels") is not None:
self.Labels = []
for item in params.get("Labels"):
obj = EdgeNodeLabel()
obj._deserialize(item)
self.Labels.append(obj)
if params.get("Resource") is not None:
self.Resource = EdgeNodeResourceInfo()
self.Resource._deserialize(params.get("Resource"))
self.RequestId = params.get("RequestId")
class DescribeEdgeNodesRequest(AbstractModel):
"""DescribeEdgeNodes请求参数结构体
"""
def __init__(self):
r"""
:param EdgeUnitId: IECP边缘单元ID
:type EdgeUnitId: int
:param NamePattern: 边缘节点名称模糊搜索串
:type NamePattern: str
:param NameMatchedList: 边缘节点名称列表,支持批量查询 ,优先于模糊查询
:type NameMatchedList: list | |
from contextlib import contextmanager
import os
from pathlib import Path
import pytest
import trio
from pons import (
abi,
ABIDecodingError,
Address,
Amount,
ContractABI,
DeployedContract,
ReadMethod,
TxHash,
BlockHash,
Block,
Either,
ContractPanic,
ContractLegacyError,
ContractError,
)
from pons._abi_types import keccak, encode_args
from pons._contract_abi import PANIC_ERROR
from pons._client import BadResponseFormat, ProviderError, TransactionFailed
from pons._entities import rpc_encode_data
from pons._provider import RPCError
from .compile import compile_file
@pytest.fixture
def compiled_contracts():
path = Path(__file__).resolve().parent / "TestClient.sol"
yield compile_file(path)
@contextmanager
def monkeypatched(obj, attr, patch):
original_value = getattr(obj, attr)
setattr(obj, attr, patch)
yield obj
setattr(obj, attr, original_value)
def normalize_topics(topics):
"""
Reduces visual noise in assertions by bringing the log topics in a log entry
(a tuple of single elements) to the format used in EventFilter
(where even single elements are 1-tuples).
"""
return tuple((elem,) for elem in topics)
async def test_net_version(test_provider, session):
net_version1 = await session.net_version()
assert net_version1 == "0"
# This is not going to get called
def wrong_net_version():
raise NotImplementedError() # pragma: no cover
# The result should have been cached the first time
with monkeypatched(test_provider, "net_version", wrong_net_version):
net_version2 = await session.net_version()
assert net_version1 == net_version2
async def test_net_version_type_check(test_provider, session):
# Provider returning a bad value
with monkeypatched(test_provider, "net_version", lambda: 0):
with pytest.raises(BadResponseFormat, match="net_version: expected a string result"):
await session.net_version()
async def test_eth_chain_id(test_provider, session):
chain_id1 = await session.eth_chain_id()
assert chain_id1 == 2299111 * 57099167
# This is not going to get called
def wrong_chain_id():
raise NotImplementedError() # pragma: no cover
# The result should have been cached the first time
with monkeypatched(test_provider, "eth_chain_id", wrong_chain_id):
chain_id2 = await session.eth_chain_id()
assert chain_id1 == chain_id2
async def test_eth_get_balance(session, root_signer, another_signer):
to_transfer = Amount.ether(10)
await session.transfer(root_signer, another_signer.address, to_transfer)
acc1_balance = await session.eth_get_balance(another_signer.address)
assert acc1_balance == to_transfer
# Non-existent address (which is technically just an unfunded address)
random_addr = Address(os.urandom(20))
balance = await session.eth_get_balance(random_addr)
assert balance == Amount.ether(0)
async def test_eth_get_transaction_receipt(test_provider, session, root_signer, another_signer):
test_provider.disable_auto_mine_transactions()
tx_hash = await session.broadcast_transfer(
root_signer, another_signer.address, Amount.ether(10)
)
receipt = await session.eth_get_transaction_receipt(tx_hash)
assert receipt is None
test_provider.enable_auto_mine_transactions()
receipt = await session.eth_get_transaction_receipt(tx_hash)
assert receipt.succeeded
# A non-existent transaction
receipt = await session.eth_get_transaction_receipt(TxHash(os.urandom(32)))
assert receipt is None
async def test_eth_get_transaction_count(session, root_signer, another_signer):
assert await session.eth_get_transaction_count(root_signer.address) == 0
await session.transfer(root_signer, another_signer.address, Amount.ether(10))
assert await session.eth_get_transaction_count(root_signer.address) == 1
async def test_wait_for_transaction_receipt(
test_provider, session, root_signer, another_signer, autojump_clock
):
to_transfer = Amount.ether(10)
test_provider.disable_auto_mine_transactions()
tx_hash = await session.broadcast_transfer(root_signer, another_signer.address, to_transfer)
# The receipt won't be available until we mine, so the waiting should time out
start_time = trio.current_time()
try:
with trio.fail_after(5):
receipt = await session.wait_for_transaction_receipt(tx_hash)
except trio.TooSlowError:
pass
end_time = trio.current_time()
assert end_time - start_time == 5
# Now let's enable mining while we wait for the receipt
receipt = None
async def get_receipt():
nonlocal receipt
receipt = await session.wait_for_transaction_receipt(tx_hash)
async def delayed_enable_mining():
await trio.sleep(5)
test_provider.enable_auto_mine_transactions()
async with trio.open_nursery() as nursery:
nursery.start_soon(get_receipt)
nursery.start_soon(delayed_enable_mining)
assert receipt.succeeded
async def test_eth_call(session, compiled_contracts, root_signer):
compiled_contract = compiled_contracts["BasicContract"]
deployed_contract = await session.deploy(root_signer, compiled_contract.constructor(123))
result = await session.eth_call(deployed_contract.read.getState(456))
assert result == (123 + 456,)
async def test_eth_call_decoding_error(session, compiled_contracts, root_signer):
"""
Tests that `eth_call()` propagates an error on mismatch of the declared output signature
and the bytestring received from the provider (as opposed to wrapping it in another exception).
"""
compiled_contract = compiled_contracts["BasicContract"]
deployed_contract = await session.deploy(root_signer, compiled_contract.constructor(123))
wrong_abi = ContractABI(
read=[
ReadMethod(
name="getState",
inputs=[abi.uint(256)],
# the actual method in in BasicContract returns only one uint256
outputs=[abi.uint(256), abi.uint(256)],
)
]
)
wrong_contract = DeployedContract(abi=wrong_abi, address=deployed_contract.address)
expected_message = (
r"Could not decode the return value with the expected signature \(uint256,uint256\): "
r"Tried to read 32 bytes. Only got 0 bytes"
)
with pytest.raises(ABIDecodingError, match=expected_message):
await session.eth_call(wrong_contract.read.getState(456))
async def test_estimate_deploy(session, compiled_contracts):
compiled_contract = compiled_contracts["BasicContract"]
gas = await session.estimate_deploy(compiled_contract.constructor(1))
assert isinstance(gas, int) and gas > 0
async def test_estimate_transfer(session, root_signer, another_signer):
gas = await session.estimate_transfer(
root_signer.address, another_signer.address, Amount.ether(10)
)
assert isinstance(gas, int) and gas > 0
with pytest.raises(
ProviderError,
match="Sender does not have enough balance to cover transaction value and gas",
):
await session.estimate_transfer(
root_signer.address, another_signer.address, Amount.ether(1000)
)
async def test_estimate_transact(session, compiled_contracts, root_signer):
compiled_contract = compiled_contracts["BasicContract"]
deployed_contract = await session.deploy(root_signer, compiled_contract.constructor(1))
gas = await session.estimate_transact(deployed_contract.write.setState(456))
assert isinstance(gas, int) and gas > 0
async def test_eth_gas_price(session):
gas_price = await session.eth_gas_price()
assert isinstance(gas_price, Amount)
async def test_eth_block_number(session, root_signer, another_signer):
await session.transfer(root_signer, another_signer.address, Amount.ether(1))
await session.transfer(root_signer, another_signer.address, Amount.ether(2))
await session.transfer(root_signer, another_signer.address, Amount.ether(3))
block_num = await session.eth_block_number()
block_info = await session.eth_get_block_by_number(block_num - 1, with_transactions=True)
assert block_info.transactions[0].value == Amount.ether(2)
async def test_transfer(session, root_signer, another_signer):
# Regular transfer
root_balance = await session.eth_get_balance(root_signer.address)
to_transfer = Amount.ether(10)
await session.transfer(root_signer, another_signer.address, to_transfer)
root_balance_after = await session.eth_get_balance(root_signer.address)
acc1_balance_after = await session.eth_get_balance(another_signer.address)
assert acc1_balance_after == to_transfer
assert root_balance - root_balance_after > to_transfer
async def test_transfer_custom_gas(session, root_signer, another_signer):
root_balance = await session.eth_get_balance(root_signer.address)
to_transfer = Amount.ether(10)
# Override gas estimate
# The standard transfer gas cost is 21000, we're being cautious here.
await session.transfer(root_signer, another_signer.address, to_transfer, gas=22000)
root_balance_after = await session.eth_get_balance(root_signer.address)
acc1_balance_after = await session.eth_get_balance(another_signer.address)
assert acc1_balance_after == to_transfer
assert root_balance - root_balance_after > to_transfer
# Not enough gas
with pytest.raises(ProviderError, match="Insufficient gas"):
await session.transfer(root_signer, another_signer.address, to_transfer, gas=20000)
async def test_transfer_failed(test_provider, session, root_signer, another_signer):
# TODO: it would be nice to reproduce the actual situation where this could happen
# (tranfer was accepted for mining, but failed in the process,
# and the resulting receipt has a 0 status).
orig_get_transaction_receipt = test_provider.eth_get_transaction_receipt
def mock_get_transaction_receipt(tx_hash_hex):
receipt = orig_get_transaction_receipt(tx_hash_hex)
receipt["status"] = "0x0"
return receipt
with monkeypatched(test_provider, "eth_get_transaction_receipt", mock_get_transaction_receipt):
with pytest.raises(TransactionFailed, match="Transfer failed"):
await session.transfer(root_signer, another_signer.address, Amount.ether(10))
async def test_deploy(test_provider, session, compiled_contracts, root_signer):
basic_contract = compiled_contracts["BasicContract"]
construction_error = compiled_contracts["TestErrors"]
payable_constructor = compiled_contracts["PayableConstructor"]
# Normal deploy
deployed_contract = await session.deploy(root_signer, basic_contract.constructor(123))
result = await session.eth_call(deployed_contract.read.getState(456))
assert result == (123 + 456,)
with pytest.raises(ValueError, match="This constructor does not accept an associated payment"):
await session.deploy(root_signer, basic_contract.constructor(1), Amount.ether(1))
# Explicit payment equal to zero is the same as no payment
await session.deploy(root_signer, basic_contract.constructor(1), Amount.ether(0))
# Payable constructor
contract = await session.deploy(
root_signer, payable_constructor.constructor(1), Amount.ether(1)
)
balance = await session.eth_get_balance(contract.address)
assert balance == Amount.ether(1)
# When gas is set manually, the gas estimation step is skipped,
# and we don't see the actual error, only the failed transaction.
with pytest.raises(TransactionFailed, match="Deploy failed"):
await session.deploy(root_signer, construction_error.constructor(0), gas=300000)
# Test the provider returning an empty `contractAddress`
orig_get_transaction_receipt = test_provider.eth_get_transaction_receipt
def mock_get_transaction_receipt(tx_hash_hex):
receipt = orig_get_transaction_receipt(tx_hash_hex)
receipt["contractAddress"] = None
return receipt
with monkeypatched(test_provider, "eth_get_transaction_receipt", mock_get_transaction_receipt):
with pytest.raises(
BadResponseFormat,
match=(
"The deploy transaction succeeded, "
"but `contractAddress` is not present in the receipt"
),
):
await session.deploy(root_signer, basic_contract.constructor(0))
async def test_transact(test_provider, session, compiled_contracts, root_signer):
basic_contract = compiled_contracts["BasicContract"]
# Normal transact
deployed_contract = await session.deploy(root_signer, basic_contract.constructor(123))
await session.transact(root_signer, deployed_contract.write.setState(456))
result = await session.eth_call(deployed_contract.read.getState(789))
assert result == (456 + 789,)
with pytest.raises(ValueError, match="This method does not accept an associated payment"):
await session.transact(root_signer, deployed_contract.write.setState(456), Amount.ether(1))
# Explicit payment equal to zero is the same as no payment
await session.transact(root_signer, deployed_contract.write.setState(456), Amount.ether(0))
# Payable transact
await session.transact(
root_signer, deployed_contract.write.payableSetState(456), Amount.ether(1)
)
balance = await session.eth_get_balance(deployed_contract.address)
assert balance == Amount.ether(1)
# Not enough gas
with pytest.raises(TransactionFailed, match="Transact failed"):
await session.transact(root_signer, deployed_contract.write.faultySetState(0), gas=300000)
async def test_get_block(test_provider, session, root_signer, another_signer):
to_transfer = Amount.ether(10)
await session.transfer(root_signer, another_signer.address, to_transfer)
block_info = await session.eth_get_block_by_number(1, with_transactions=True)
assert block_info.transactions is not None
block_info2 = await session.eth_get_block_by_hash(block_info.hash, with_transactions=True)
assert block_info2 == block_info
# no transactions
block_info = await session.eth_get_block_by_number(1)
assert block_info.transactions is None
# non-existent block
block_info = await session.eth_get_block_by_number(100, with_transactions=True)
assert block_info is None
block_info = await session.eth_get_block_by_hash(
BlockHash(b"\x00" * 32), with_transactions=True
)
assert block_info is None
async def test_eth_get_transaction_by_hash(test_provider, session, root_signer, another_signer):
to_transfer = Amount.ether(1)
tx_hash = await session.broadcast_transfer(root_signer, another_signer.address, to_transfer)
tx_info = await session.eth_get_transaction_by_hash(tx_hash)
assert tx_info.value == to_transfer
non_existent = TxHash(b"abcd" * 8)
tx_info = await session.eth_get_transaction_by_hash(non_existent)
assert tx_info is None
async def test_block_filter(test_provider, session, root_signer, another_signer):
to_transfer = Amount.ether(1)
await session.transfer(root_signer, another_signer.address, to_transfer)
block_filter = await session.eth_new_block_filter()
await session.transfer(root_signer, another_signer.address, to_transfer)
await session.transfer(root_signer, another_signer.address, to_transfer)
last_block = await session.eth_get_block_by_number(Block.LATEST)
prev_block = await session.eth_get_block_by_number(last_block.number - 1)
block_hashes = await session.eth_get_filter_changes(block_filter)
assert block_hashes == (prev_block.hash, last_block.hash)
await session.transfer(root_signer, another_signer.address, to_transfer)
block_hashes = await session.eth_get_filter_changes(block_filter)
last_block = await session.eth_get_block_by_number(Block.LATEST)
assert block_hashes == (last_block.hash,)
block_hashes = await session.eth_get_filter_changes(block_filter)
assert len(block_hashes) == 0
async def test_pending_transaction_filter(test_provider, session, root_signer, another_signer):
transaction_filter = await session.eth_new_pending_transaction_filter()
to_transfer = Amount.ether(1)
test_provider.disable_auto_mine_transactions()
tx_hash = await session.broadcast_transfer(root_signer, another_signer.address, to_transfer)
tx_hashes = await session.eth_get_filter_changes(transaction_filter)
assert tx_hashes == (tx_hash,)
async def test_log_filter_all(session, compiled_contracts, root_signer, another_signer):
| |
face
v_uvz[..., 0] = (v_uvz[..., 0] * 0.5 + 0.5) * depth.shape[2] # [1, num_vertex]
v_uvz[..., 1] = (1 - (v_uvz[..., 1] * 0.5 + 0.5)) * depth.shape[1] # [1, num_vertex]
v_depth = misc.interpolate_bilinear(depth[0, :, :, None], v_uvz[..., 0], v_uvz[..., 1]) # [1, num_vertex, 1]
v_front_mask = ((v_uvz[0, :, 2] - v_depth[0, :, 0]) < self.mesh_span * 5e-3)[None, :] # [1, num_vertex]
# perspective correct weight
faces_v_z_inv_map = torch.cuda.FloatTensor(batch_size, image_size, image_size, 3).fill_(0.0)
for i in range(batch_size):
faces_v_z_inv_map[i, ...] = 1 / faces_v_uvz[i, face_index_map[i, ...].long()][..., -1]
weight_map = (faces_v_z_inv_map * weight_map) * depth.unsqueeze_(-1) # [batch_size, image_size, image_size, 3]
weight_map = weight_map.unsqueeze_(-1) # [batch_size, image_size, image_size, 3, 1]
# uv map
if self.renderer.fill_back:
faces_vt_idx = torch.cat((self.faces_vt_idx, self.faces_vt_idx[:, :, list(reversed(range(self.faces_vt_idx.shape[-1])))]), dim=1).detach()
else:
faces_vt_idx = self.faces_vt_idx.detach()
faces_vt = nr.vertex_attrs_to_faces(self.vertices_texcoords, faces_vt_idx) # [1, num_face, 3, 2]
uv_map = faces_vt[:, face_index_map.long()].squeeze_(0) # [batch_size, image_size, image_size, 3, 2], before weighted combination
uv_map = (uv_map * weight_map).sum(-2) # [batch_size, image_size, image_size, 2], after weighted combination
uv_map = uv_map - uv_map.floor() # handle uv_map wrapping, keep uv in [0, 1]
# normal map in world space
if self.renderer.fill_back:
faces_vn_idx = torch.cat((self.faces_vn_idx, self.faces_vn_idx[:, :, list(reversed(range(self.faces_vn_idx.shape[-1])))]), dim=1).detach()
else:
faces_vn_idx = self.faces_vn_idx.detach()
faces_vn = nr.vertex_attrs_to_faces(self.vertices_normals, faces_vn_idx) # [1, num_face, 3, 3]
normal_map = faces_vn[:, face_index_map.long()].squeeze_(0) # [batch_size, image_size, image_size, 3, 3], before weighted combination
normal_map = (normal_map * weight_map).sum(-2) # [batch_size, image_size, image_size, 3], after weighted combination
normal_map = torch.nn.functional.normalize(normal_map, dim = -1)
# normal_map in camera space
normal_map_flat = normal_map.flatten(start_dim = 1, end_dim = 2).permute((0, 2, 1))
normal_map_cam = pose[:, :3, :3].matmul(normal_map_flat).permute((0, 2, 1)).reshape(normal_map.shape)
normal_map_cam = torch.nn.functional.normalize(normal_map_cam, dim = -1)
# position_map in world space
faces_v = nr.vertex_attrs_to_faces(self.vertices, faces_v_idx) # [1, num_face, 3, 3]
position_map = faces_v[0, face_index_map.long()] # [batch_size, image_size, image_size, 3, 3], before weighted combination
position_map = (position_map * weight_map).sum(-2) # [batch_size, image_size, image_size, 3], after weighted combination
# position_map in camera space
position_map_flat = position_map.flatten(start_dim = 1, end_dim = 2).permute((0, 2, 1))
position_map_cam = pose[:, :3, :3].matmul(position_map_flat).permute((0, 2, 1)).reshape(position_map.shape) + pose[:, :3, -1][:, None, None, :]
return uv_map, alpha, face_index_map, weight_map, faces_v_idx, normal_map, normal_map_cam, faces_v, faces_vt, position_map, position_map_cam, depth, v_uvz, v_front_mask
class RenderingModule(nn.Module):
def __init__(self,
nf0,
in_channels,
out_channels,
num_down_unet = 5,
out_channels_gcn = 512,
use_gcn = True,
outermost_highway_mode = 'concat'):
super().__init__()
self.register_buffer('nf0', torch.tensor(nf0))
self.register_buffer('in_channels', torch.tensor(in_channels))
self.register_buffer('out_channels', torch.tensor(out_channels))
self.register_buffer('num_down_unet', torch.tensor(num_down_unet))
self.register_buffer('out_channels_gcn', torch.tensor(out_channels_gcn))
self.net = Unet(in_channels = in_channels,
out_channels = out_channels,
outermost_linear = True,
use_dropout = False,
# use_dropout = True,
dropout_prob = 0.1,
nf0 = nf0,
norm = nn.InstanceNorm2d,
# norm = nn.BatchNorm2d,# chenxin 200803 temporary change for debug
max_channels = 8 * nf0,
num_down = num_down_unet,
out_channels_gcn = out_channels_gcn,
use_gcn = use_gcn,
outermost_highway_mode = outermost_highway_mode)
# self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, input, v_fea = None):
x = self.net(input, v_fea)
# return self.tanh(x)
return self.sigmoid(x).clone()
class FeatureModule(nn.Module):
def __init__(self,
nf0,
in_channels,
out_channels,
num_down_unet = 5,
out_channels_gcn = 512,
use_gcn = True,
outermost_highway_mode = 'concat',
backbone = 'Unet'):
super().__init__()
self.register_buffer('nf0', torch.tensor(nf0))
self.register_buffer('in_channels', torch.tensor(in_channels))
self.register_buffer('out_channels', torch.tensor(out_channels))
self.register_buffer('num_down_unet', torch.tensor(num_down_unet))
self.register_buffer('out_channels_gcn', torch.tensor(out_channels_gcn))
self.net = eval(backbone)(in_channels = in_channels,
out_channels = out_channels,
outermost_linear = True,
use_dropout = False,
dropout_prob = 0.1,
nf0 = nf0,
norm = nn.InstanceNorm2d,
max_channels = 8 * nf0,
num_down = num_down_unet,
out_channels_gcn = out_channels_gcn,
use_gcn = use_gcn,
outermost_highway_mode = outermost_highway_mode)
# self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, orig_texs, neural_tex = None, v_fea = None):
'''
orig_tex: [N, H, W, 3]
neural_tex: [1, H, W, C]
return: [N, C, H, W]
'''
# cat neural tex for each batch
if neural_tex is not None:
repeat_size = (int(orig_texs.shape[0]/neural_tex.shape[0]),1,1,1)
neural_texs = neural_tex.repeat(repeat_size)
cat_texs = torch.cat((orig_texs, neural_texs), 3).permute(0,3,1,2)
else:
cat_texs = orig_texs
# unet
x = self.net(cat_texs, v_fea)
# # average each batch
# x_mean = torch.mean(x, dim=0, keepdim=True)
# # return self.tanh(x_mean)
return self.sigmoid(x)
class AttentionFeatureModule(FeatureModule):
def __init__(self,
nf0,
in_channels,
out_channels,
num_down_unet = 5,
out_channels_gcn = 512,
use_gcn = True,
outermost_highway_mode = 'concat'):
backbone = 'AttentionUnet'
super().__init__(nf0,
in_channels,
out_channels,
num_down_unet,
out_channels_gcn,
use_gcn,
outermost_highway_mode,
backbone)
self.sigmoid = nn.Sigmoid()
def forward(self, orig_texs, neural_tex = None, v_fea = None):
'''
orig_tex: [N, H, W, 3]
neural_tex: [1, H, W, C]
return: [N, C, H, W]
'''
# cat neural tex for each batch
if neural_tex is not None:
repeat_size = (int(orig_texs.shape[0]/neural_tex.shape[0]),1,1,1)
neural_texs = neural_tex.repeat(repeat_size)
cat_texs = torch.cat((orig_texs, neural_texs), 3).permute(0,3,1,2)
else:
cat_texs = orig_texs
# unet
x = self.net(cat_texs, v_fea)
feature_ch = x[:,:-1,:,:]
attention_ch = x[:,-1,:,:][:,None,:,:]
attention_ch = self.sigmoid(attention_ch)
return feature_ch, attention_ch
class DenseDeepGCN(torch.nn.Module):
def __init__(self, opt):
super(DenseDeepGCN, self).__init__()
channels = opt.n_filters
k = opt.kernel_size
act = opt.act_type
norm = opt.norm_type
bias = opt.bias
epsilon = opt.epsilon
stochastic = opt.stochastic
conv = opt.conv_type
c_growth = channels
self.n_blocks = opt.n_blocks
num_v = opt.num_v_gcn
out_channels = opt.out_channels_gcn
self.knn = DenseDilatedKnnGraph(k, 1, stochastic, epsilon)
self.head = GraphConv4D(opt.in_channels, channels, conv, act, norm, bias)
if opt.block_type.lower() == 'res':
self.backbone = Seq(*[ResDynBlock4D(channels, k, 1+i, conv, act, norm, bias, stochastic, epsilon)
for i in range(self.n_blocks-1)])
elif opt.block_type.lower() == 'dense':
self.backbone = Seq(*[DenseDynBlock4D(channels+c_growth*i, c_growth, k, 1+i, conv, act,
norm, bias, stochastic, epsilon)
for i in range(self.n_blocks-1)])
else:
raise NotImplementedError('{} is not implemented. Please check.\n'.format(opt.block))
self.fusion_block = BasicConv([channels+c_growth*(self.n_blocks-1), 1024], act, None, bias)
self.prediction = Seq(*[BasicConv([1+channels+c_growth*(self.n_blocks-1), 512, 256], act, None, bias),
BasicConv([256, 64], act, None, bias)])
self.linear = Seq(*[utils.spectral_norm(nn.Linear(num_v,2048)), utils.spectral_norm(nn.Linear(2048, out_channels))])
self.model_init()
def model_init(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
m.weight.requires_grad = True
if m.bias is not None:
m.bias.data.zero_()
m.bias.requires_grad = True
elif isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
m.weight.requires_grad = True
elif isinstance(m,torch.nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, inputs):
data = torch.cat((inputs.pos,inputs.x),1).unsqueeze(0).unsqueeze(-1)
feats = [self.head(data.transpose(2,1), self.knn(data[:,:, 0:3]))]
for i in range(self.n_blocks-1):
feats.append(self.backbone[i](feats[-1]))
feats = torch.cat(feats, 1)
fusion, _ = torch.max(self.fusion_block(feats), 1, keepdim=True)
fea = self.linear(fusion.view(-1)).unsqueeze(0)
return fea
class Interpolater(nn.Module):
def __init__(self):
super().__init__()
def forward(self, data, sub_x, sub_y):
'''
data: [N, H, W, C] or [1, H, W, C]
sub_x: [N, ...]
sub_y: [N, ...]
return: [N, ..., C]
'''
if data.shape[0] == 1:
return misc.interpolate_bilinear(data[0, :], sub_x, sub_y) # [N, ..., C]
elif data.shape[0] == sub_x.shape[0]:
out = []
for i in range(data.shape[0]):
out.append(misc.interpolate_bilinear(data[i, :], sub_x[i, :], sub_y[i, :])) # [..., C]
return torch.stack(out) # [N, ..., C]
else:
raise ValueError('data.shape[0] should be 1 or batch size')
class InterpolaterVertexAttr(nn.Module):
def __init__(self):
super().__init__()
def forward(self, v_attr, faces_v_idx, face_index_map, weight_map):
'''
v_attr: [N, num_vertex, num_attr] or [1, num_vertex, num_attr]
faces_v_idx: [N, num_face, 3]
face_index_map: [N, H, W]
weight_map: [N, H, W, 3, 1]
return: [N, H, W, num_attr]
'''
return render.interp_vertex_attr(v_attr, faces_v_idx, face_index_map, weight_map)
class Mesh(nn.Module):
def __init__(self, obj_fp, global_RT = None):
super().__init__()
# load obj
v_attr, f_attr = nr.load_obj(obj_fp, normalization = False)
v = v_attr['v'].cpu() # [num_vertex, 3]
vn = v_attr['vn'].cpu() # [num_vertex, 3]
self.num_vertex = v.shape[0]
# compute useful infomation
self.v_orig = v.clone()
self.vn_orig = vn.clone()
self.span_orig = v.max(dim = 0)[0] - v.min(dim = 0)[0]
self.span_max_orig = self.span_orig.max()
self.center_orig = v.mean(dim = 0)
# apply global_RT
if global_RT is not None:
v = torch.matmul(global_RT.to(v.device), torch.cat((v, torch.ones(self.num_vertex, 1).to(v.device)), dim = 1).transpose(1, 0)).transpose(1, 0)[:, :3]
vn = torch.nn.functional.normalize(torch.matmul(global_RT[:3, :3].to(vn.device), vn.transpose(1, 0)).transpose(1, 0), dim = 1)
self.register_buffer('v', v)
self.register_buffer('vn', vn)
print('v shape:', self.v.shape)
print('vn shape:', self.vn.shape)
# compute useful infomation
self.span = v.max(dim = 0)[0] - v.min(dim = 0)[0]
self.span_max = self.span.max()
self.center = v.mean(dim = 0)
def forward(self):
pass
class RaysLTChromLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, rays_lt, alpha_map, img = None):
'''
rays_lt: [N, num_ray, C, H, W]
alpha_map: [N, 1, H, W]
img: [N, C, H, W]
return: [1]
'''
rays_lt_chrom = torch.nn.functional.normalize(rays_lt, dim = 2) # [N, num_ray, C, H, W]
rays_lt_chrom_mean = rays_lt_chrom.mean(dim = 1)[:, None, :, :, :] # [N, 1, C, H, W]
rays_lt_chrom_mean = torch.nn.functional.normalize(rays_lt_chrom_mean, dim = 2) # [N, 1, C, H, W]
rays_lt_chrom_diff = (1 - (rays_lt_chrom * rays_lt_chrom_mean).sum(2)) * alpha_map # [N, num_ray, H, W]
if img is not None:
# weight by image intensity
weight = (img.norm(dim = 1, keepdim = True) * 20).clamp(max = 1.0)
rays_lt_chrom_diff = rays_lt_chrom_diff * weight # [N, num_ray, H, W]
loss_rays_lt_chrom = rays_lt_chrom_diff.sum() / alpha_map.sum() / rays_lt_chrom_diff.shape[1]
return loss_rays_lt_chrom, rays_lt_chrom, rays_lt_chrom_mean, rays_lt_chrom_diff
####################################################################################################################################
################################################## | |
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
# Injected modules are '_warnings', 'imp', 'sys', 'marshal', 'errno', '_io',
# and '_os' (a.k.a. 'posix', 'nt' or 'os2').
# Injected attribute is path_sep.
#
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
# XXX Could also expose Modules/getpath.c:joinpath()
def _path_join(*args):
"""Replacement for os.path.join."""
return path_sep.join(x[:-len(path_sep)] if x.endswith(path_sep) else x
for x in args)
def _path_exists(path):
"""Replacement for os.path.exists."""
try:
_os.stat(path)
except OSError:
return False
else:
return True
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _os.stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
# XXX Could also expose Modules/getpath.c:isfile()
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
# XXX Could also expose Modules/getpath.c:isdir()
def _path_isdir(path):
"""Replacement for os.path.isdir."""
return _path_is_mode_type(path, 0o040000)
def _path_without_ext(path, ext_type):
"""Replacement for os.path.splitext()[0]."""
for suffix in _suffix_list(ext_type):
if path.endswith(suffix):
return path[:-len(suffix)]
else:
raise ValueError("path is not of the specified type")
def _path_absolute(path):
"""Replacement for os.path.abspath."""
if not path:
path = _os.getcwd()
try:
return _os._getfullpathname(path)
except AttributeError:
if path.startswith('/'):
return path
else:
return _path_join(_os.getcwd(), path)
class _closing:
"""Simple replacement for contextlib.closing."""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
return self.obj
def __exit__(self, *args):
self.obj.close()
def _wrap(new, old):
"""Simple substitute for functools.wraps."""
for replace in ['__module__', '__name__', '__doc__']:
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
# Finder/loader utility code ##################################################
def set_package(fxn):
"""Set __package__ on the returned module."""
def wrapper(*args, **kwargs):
module = fxn(*args, **kwargs)
if not hasattr(module, '__package__') or module.__package__ is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
_wrap(wrapper, fxn)
return wrapper
def set_loader(fxn):
"""Set __loader__ on the returned module."""
def wrapper(self, *args, **kwargs):
module = fxn(self, *args, **kwargs)
if not hasattr(module, '__loader__'):
module.__loader__ = self
return module
_wrap(wrapper, fxn)
return wrapper
def module_for_loader(fxn):
"""Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module which has __name__ set and is inserted
into sys.modules. If an exception is raised and the decorator created the
module it is subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument.
"""
def decorated(self, fullname):
module = sys.modules.get(fullname)
is_reload = bool(module)
if not is_reload:
# This must be done before open() is called as the 'io' module
# implicitly imports 'locale' and would otherwise trigger an
# infinite loop.
module = imp.new_module(fullname)
sys.modules[fullname] = module
try:
return fxn(self, module)
except:
if not is_reload:
del sys.modules[fullname]
raise
_wrap(decorated, fxn)
return decorated
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def inner(self, name, *args, **kwargs):
if self._name != name:
raise ImportError("loader cannot handle %s" % name)
return method(self, name, *args, **kwargs)
_wrap(inner, method)
return inner
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError("{0} is not a built-in module".format(fullname))
return fxn(self, fullname)
_wrap(wrapper, fxn)
return wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def wrapper(self, fullname):
if not imp.is_frozen(fullname):
raise ImportError("{0} is not a frozen module".format(fullname))
return fxn(self, fullname)
_wrap(wrapper, fxn)
return wrapper
def _suffix_list(suffix_type):
"""Return a list of file suffixes based on the imp file type."""
return [suffix[0] for suffix in imp.get_suffixes()
if suffix[2] == suffix_type]
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
"""
if path is not None:
return None
return cls if imp.is_builtin(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
is_reload = fullname in sys.modules
try:
return imp.init_builtin(fullname)
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return None as built-in module are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module."""
return cls if imp.is_frozen(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_frozen
def load_module(cls, fullname):
"""Load a frozen module."""
is_reload = fullname in sys.modules
try:
return imp.init_frozen(fullname)
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return if the frozen module is a package."""
return imp.is_frozen_package(fullname)
class PyLoader:
"""Loader base class for Python source code.
Subclasses need to implement the methods:
- source_path
- get_data
- is_package
"""
@module_for_loader
def load_module(self, module):
"""Load a source module."""
return self._load_module(module)
def _load_module(self, module):
"""Initialize a module from source."""
name = module.__name__
code_object = self.get_code(module.__name__)
# __file__ may have been set by the caller, e.g. bytecode path.
if not hasattr(module, '__file__'):
module.__file__ = self.source_path(name)
if self.is_package(name):
module.__path__ = [module.__file__.rsplit(path_sep, 1)[0]]
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
module.__loader__ = self
exec(code_object, module.__dict__)
return module
def get_code(self, fullname):
"""Get a code object from source."""
source_path = self.source_path(fullname)
if source_path is None:
message = "a source path must exist to load {0}".format(fullname)
raise ImportError(message)
source = self.get_data(source_path)
# Convert to universal newlines.
line_endings = b'\n'
for index, c in enumerate(source):
if c == ord(b'\n'):
break
elif c == ord(b'\r'):
line_endings = b'\r'
try:
if source[index+1] == ord(b'\n'):
line_endings += b'\n'
except IndexError:
pass
break
if line_endings != b'\n':
source = source.replace(line_endings, b'\n')
return compile(source, source_path, 'exec', dont_inherit=True)
# Never use in implementing import! Imports code within the method.
def get_source(self, fullname):
"""Return the source code for a module.
self.source_path() and self.get_data() are used to implement this
method.
"""
path = self.source_path(fullname)
if path is None:
return None
try:
source_bytes = self.get_data(path)
except IOError:
return ImportError("source not available through get_data()")
import io
import tokenize
encoding = tokenize.detect_encoding(io.BytesIO(source_bytes).readline)
return source_bytes.decode(encoding[0])
class PyPycLoader(PyLoader):
"""Loader base class for Python source and bytecode.
Requires implementing the methods needed for PyLoader as well as
source_mtime, bytecode_path, and write_bytecode.
"""
@module_for_loader
def load_module(self, module):
"""Load a module from source or bytecode."""
name = module.__name__
source_path = self.source_path(name)
bytecode_path = self.bytecode_path(name)
# get_code can worry about no viable paths existing.
module.__file__ = source_path or bytecode_path
return self._load_module(module)
def get_code(self, fullname):
"""Get a code object from source or bytecode."""
# XXX Care enough to make sure this call does not happen if the magic
# number is bad?
source_timestamp = self.source_mtime(fullname)
# Try to use bytecode if it is available.
bytecode_path = self.bytecode_path(fullname)
if bytecode_path:
data = self.get_data(bytecode_path)
try:
magic = data[:4]
if len(magic) < 4:
raise ImportError("bad magic number in {}".format(fullname))
raw_timestamp = data[4:8]
if len(raw_timestamp) < 4:
raise EOFError("bad timestamp in {}".format(fullname))
pyc_timestamp = marshal._r_long(raw_timestamp)
bytecode = data[8:]
# Verify that the magic number is valid.
if imp.get_magic() != magic:
raise ImportError("bad magic number in {}".format(fullname))
# Verify that the bytecode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.