input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
0)
m.c2526 = Constraint(expr= m.x2525 - m.b3017 <= 0)
m.c2527 = Constraint(expr= m.x2526 - m.b3017 <= 0)
m.c2528 = Constraint(expr= m.x2527 - m.b3017 <= 0)
m.c2529 = Constraint(expr= m.x2528 - m.b3017 <= 0)
m.c2530 = Constraint(expr= m.x2529 - m.b3017 <= 0)
m.c2531 = Constraint(expr= m.x2530 - m.b3017 <= 0)
m.c2532 = Constraint(expr= m.x2531 - m.b3017 <= 0)
m.c2533 = Constraint(expr= m.x2532 - m.b3017 <= 0)
m.c2534 = Constraint(expr= m.x2533 - m.b3017 <= 0)
m.c2535 = Constraint(expr= m.x2534 - m.b3017 <= 0)
m.c2536 = Constraint(expr= m.x2535 - m.b3017 <= 0)
m.c2537 = Constraint(expr= m.x2536 - m.b3017 <= 0)
m.c2538 = Constraint(expr= m.x2537 - m.b3017 <= 0)
m.c2539 = Constraint(expr= m.x2538 - m.b3017 <= 0)
m.c2540 = Constraint(expr= m.x2539 - m.b3017 <= 0)
m.c2541 = Constraint(expr= m.x2540 - m.b3017 <= 0)
m.c2542 = Constraint(expr= m.x2541 - m.b3017 <= 0)
m.c2543 = Constraint(expr= m.x2542 - m.b3017 <= 0)
m.c2544 = Constraint(expr= m.x2543 - m.b3017 <= 0)
m.c2545 = Constraint(expr= m.x2544 - m.b3017 <= 0)
m.c2546 = Constraint(expr= m.x2545 - m.b3017 <= 0)
m.c2547 = Constraint(expr= m.x2546 - m.b3017 <= 0)
m.c2548 = Constraint(expr= m.x2547 - m.b3017 <= 0)
m.c2549 = Constraint(expr= m.x2548 - m.b3017 <= 0)
m.c2550 = Constraint(expr= m.x2549 - m.b3017 <= 0)
m.c2551 = Constraint(expr= m.x2550 - m.b3017 <= 0)
m.c2552 = Constraint(expr= m.x2551 - m.b3018 <= 0)
m.c2553 = Constraint(expr= m.x2552 - m.b3018 <= 0)
m.c2554 = Constraint(expr= m.x2553 - m.b3018 <= 0)
m.c2555 = Constraint(expr= m.x2554 - m.b3018 <= 0)
m.c2556 = Constraint(expr= m.x2555 - m.b3018 <= 0)
m.c2557 = Constraint(expr= m.x2556 - m.b3018 <= 0)
m.c2558 = Constraint(expr= m.x2557 - m.b3018 <= 0)
m.c2559 = Constraint(expr= m.x2558 - m.b3018 <= 0)
m.c2560 = Constraint(expr= m.x2559 - m.b3018 <= 0)
m.c2561 = Constraint(expr= m.x2560 - m.b3018 <= 0)
m.c2562 = Constraint(expr= m.x2561 - m.b3018 <= 0)
m.c2563 = Constraint(expr= m.x2562 - m.b3018 <= 0)
m.c2564 = Constraint(expr= m.x2563 - m.b3018 <= 0)
m.c2565 = Constraint(expr= m.x2564 - m.b3018 <= 0)
m.c2566 = Constraint(expr= m.x2565 - m.b3018 <= 0)
m.c2567 = Constraint(expr= m.x2566 - m.b3018 <= 0)
m.c2568 = Constraint(expr= m.x2567 - m.b3018 <= 0)
m.c2569 = Constraint(expr= m.x2568 - m.b3018 <= 0)
m.c2570 = Constraint(expr= m.x2569 - m.b3018 <= 0)
m.c2571 = Constraint(expr= m.x2570 - m.b3018 <= 0)
m.c2572 = Constraint(expr= m.x2571 - m.b3018 <= 0)
m.c2573 = Constraint(expr= m.x2572 - m.b3018 <= 0)
m.c2574 = Constraint(expr= m.x2573 - m.b3018 <= 0)
m.c2575 = Constraint(expr= m.x2574 - m.b3018 <= 0)
m.c2576 = Constraint(expr= m.x2575 - m.b3018 <= 0)
m.c2577 = Constraint(expr= m.x2576 - m.b3018 <= 0)
m.c2578 = Constraint(expr= m.x2577 - m.b3018 <= 0)
m.c2579 = Constraint(expr= m.x2578 - m.b3018 <= 0)
m.c2580 = Constraint(expr= m.x2579 - m.b3018 <= 0)
m.c2581 = Constraint(expr= m.x2580 - m.b3018 <= 0)
m.c2582 = Constraint(expr= m.x2581 - m.b3018 <= 0)
m.c2583 = Constraint(expr= m.x2582 - m.b3018 <= 0)
m.c2584 = Constraint(expr= m.x2583 - m.b3018 <= 0)
m.c2585 = Constraint(expr= m.x2584 - m.b3018 <= 0)
m.c2586 = Constraint(expr= m.x2585 - m.b3018 <= 0)
m.c2587 = Constraint(expr= m.x2586 - m.b3018 <= 0)
m.c2588 = Constraint(expr= m.x2587 - m.b3018 <= 0)
m.c2589 = Constraint(expr= m.x2588 - m.b3018 <= 0)
m.c2590 = Constraint(expr= m.x2589 - m.b3018 <= 0)
m.c2591 = Constraint(expr= m.x2590 - m.b3018 <= 0)
m.c2592 = Constraint(expr= m.x2591 - m.b3018 <= 0)
m.c2593 = Constraint(expr= m.x2592 - m.b3018 <= 0)
m.c2594 = Constraint(expr= m.x2593 - m.b3018 <= 0)
m.c2595 = Constraint(expr= m.x2594 - m.b3018 <= 0)
m.c2596 = Constraint(expr= m.x2595 - m.b3018 <= 0)
m.c2597 = Constraint(expr= m.x2596 - m.b3018 <= 0)
m.c2598 = Constraint(expr= m.x2597 - m.b3018 <= 0)
m.c2599 = Constraint(expr= m.x2598 - m.b3018 <= 0)
m.c2600 = Constraint(expr= m.x2599 - m.b3018 <= 0)
m.c2601 = Constraint(expr= m.x2600 - m.b3018 <= 0)
m.c2602 = Constraint(expr= m.x2601 - m.b3018 <= 0)
m.c2603 = Constraint(expr= m.x2602 - m.b3018 <= 0)
m.c2604 = Constraint(expr= m.x2603 - m.b3018 <= 0)
m.c2605 = Constraint(expr= m.x2604 - m.b3018 <= 0)
m.c2606 = Constraint(expr= m.x2605 - m.b3018 <= 0)
m.c2607 = Constraint(expr= m.x2606 - m.b3018 <= 0)
m.c2608 = Constraint(expr= m.x2607 - m.b3018 <= 0)
m.c2609 = Constraint(expr= m.x2608 - m.b3018 <= 0)
m.c2610 = Constraint(expr= m.x2609 - m.b3018 <= 0)
m.c2611 = Constraint(expr= m.x2610 - m.b3018 <= 0)
m.c2612 = Constraint(expr= m.x2611 - m.b3018 <= 0)
m.c2613 = Constraint(expr= m.x2612 - m.b3018 <= 0)
m.c2614 = Constraint(expr= m.x2613 - m.b3018 <= 0)
m.c2615 = Constraint(expr= m.x2614 - m.b3018 <= 0)
m.c2616 = Constraint(expr= m.x2615 - m.b3018 <= 0)
m.c2617 = Constraint(expr= m.x2616 - m.b3018 <= 0)
m.c2618 = Constraint(expr= m.x2617 - m.b3018 <= 0)
m.c2619 = Constraint(expr= m.x2618 - m.b3018 <= 0)
m.c2620 = Constraint(expr= m.x2619 - m.b3018 <= 0)
m.c2621 = Constraint(expr= m.x2620 - m.b3018 <= 0)
m.c2622 = Constraint(expr= m.x2621 - m.b3018 <= 0)
m.c2623 = Constraint(expr= m.x2622 - m.b3018 <= 0)
m.c2624 = Constraint(expr= m.x2623 - m.b3018 <= 0)
m.c2625 = Constraint(expr= m.x2624 - m.b3018 <= 0)
m.c2626 = Constraint(expr= m.x2625 - m.b3018 <= 0)
m.c2627 = Constraint(expr= m.x2626 - m.b3018 <= 0)
m.c2628 = Constraint(expr= m.x2627 - m.b3018 <= 0)
m.c2629 = Constraint(expr= m.x2628 - m.b3018 <= 0)
m.c2630 = Constraint(expr= m.x2629 - m.b3018 <= 0)
m.c2631 = Constraint(expr= m.x2630 - m.b3018 <= 0)
m.c2632 = Constraint(expr= m.x2631 - m.b3018 <= 0)
m.c2633 = Constraint(expr= m.x2632 - m.b3018 <= 0)
m.c2634 = Constraint(expr= m.x2633 - m.b3018 <= 0)
m.c2635 = Constraint(expr= m.x2634 - m.b3018 <= 0)
m.c2636 = Constraint(expr= m.x2635 - m.b3018 <= 0)
m.c2637 = Constraint(expr= m.x2636 - m.b3018 <= 0)
m.c2638 = Constraint(expr= m.x2637 - m.b3018 <= 0)
m.c2639 = Constraint(expr= m.x2638 - m.b3018 <= 0)
m.c2640 = Constraint(expr= m.x2639 - m.b3018 <= 0)
m.c2641 = Constraint(expr= m.x2640 - m.b3018 <= 0)
m.c2642 = Constraint(expr= m.x2641 - m.b3018 <= 0)
m.c2643 = Constraint(expr= m.x2642 - m.b3018 <= 0)
m.c2644 = Constraint(expr= m.x2643 - m.b3018 <= 0)
m.c2645 = Constraint(expr= m.x2644 - m.b3018 <= 0)
m.c2646 = Constraint(expr= m.x2645 - m.b3018 <= 0)
m.c2647 = Constraint(expr= m.x2646 - m.b3018 <= 0)
m.c2648 = Constraint(expr= m.x2647 - m.b3018 <= 0)
m.c2649 = Constraint(expr= m.x2648 - m.b3018 <= 0)
m.c2650 = Constraint(expr= m.x2649 - m.b3018 <= 0)
m.c2651 = Constraint(expr= m.x2650 - m.b3018 <= 0)
m.c2652 = Constraint(expr= m.x2651 - m.b3018 <= 0)
m.c2653 = Constraint(expr= m.x2652 - m.b3018 <= 0)
m.c2654 = Constraint(expr= m.x2653 - m.b3018 <= 0)
m.c2655 = Constraint(expr= m.x2654 - m.b3018 <= 0)
m.c2656 = Constraint(expr= m.x2655 - m.b3018 <= 0)
m.c2657 = Constraint(expr= m.x2656 - m.b3018 <= 0)
m.c2658 = Constraint(expr= m.x2657 - m.b3018 <= 0)
m.c2659 = Constraint(expr= m.x2658 - m.b3018 <= 0)
m.c2660 = Constraint(expr= m.x2659 - m.b3018 <= 0)
m.c2661 = Constraint(expr= m.x2660 - m.b3018 <= 0)
m.c2662 = Constraint(expr= m.x2661 - m.b3018 <= 0)
m.c2663 = Constraint(expr= m.x2662 - m.b3018 <= 0)
m.c2664 = Constraint(expr= m.x2663 - m.b3018 <= 0)
m.c2665 = Constraint(expr= m.x2664 - m.b3018 <= 0)
m.c2666 = Constraint(expr= m.x2665 - m.b3018 <= 0)
m.c2667 = Constraint(expr= m.x2666 - m.b3018 <= 0)
m.c2668 = Constraint(expr= m.x2667 - m.b3018 <= 0)
m.c2669 = Constraint(expr= m.x2668 - m.b3018 <= 0)
m.c2670 = Constraint(expr= m.x2669 - m.b3018 <= 0)
m.c2671 = Constraint(expr= m.x2670 - m.b3018 <= 0)
m.c2672 = Constraint(expr= m.x2671 - m.b3018 <= 0)
m.c2673 = Constraint(expr= m.x2672 - m.b3018 <= 0)
m.c2674 = Constraint(expr= m.x2673 - m.b3018 <= 0)
m.c2675 = Constraint(expr= m.x2674 - m.b3018 <= 0)
m.c2676 = Constraint(expr= m.x2675 - m.b3018 <= 0)
m.c2677 = Constraint(expr= m.x2676 - m.b3018 <= 0)
m.c2678 = Constraint(expr= m.x2677 - m.b3018 <= 0)
m.c2679 = Constraint(expr= m.x2678 - m.b3018 <= 0)
m.c2680 = Constraint(expr= m.x2679 - m.b3018 <= 0)
m.c2681 = Constraint(expr= m.x2680 - m.b3018 <= 0)
m.c2682 = Constraint(expr= m.x2681 - m.b3018 <= 0)
m.c2683 = Constraint(expr= m.x2682 - m.b3018 <= 0)
m.c2684 = Constraint(expr= m.x2683 - m.b3018 <= 0)
m.c2685 = Constraint(expr= m.x2684 - m.b3018 <= 0)
m.c2686 = Constraint(expr= m.x2685 - m.b3018 <= 0)
m.c2687 = Constraint(expr= m.x2686 - m.b3018 <= 0)
m.c2688 = Constraint(expr= m.x2687 - m.b3018 <= 0)
m.c2689 = Constraint(expr= m.x2688 - m.b3018 <= 0)
m.c2690 = Constraint(expr= m.x2689 - m.b3018 <= 0)
m.c2691 = Constraint(expr= m.x2690 - m.b3018 <= 0)
m.c2692 = Constraint(expr= m.x2691 - m.b3018 <= 0)
m.c2693 = Constraint(expr= m.x2692 - m.b3018 <= 0)
m.c2694 = Constraint(expr= m.x2693 - m.b3018 <= 0)
m.c2695 = Constraint(expr= m.x2694 - m.b3018 <= 0)
m.c2696 = Constraint(expr= m.x2695 - m.b3018 <= 0)
m.c2697 = Constraint(expr= m.x2696 - m.b3018 <= 0)
m.c2698 = Constraint(expr= m.x2697 - m.b3018 <= 0)
m.c2699 = Constraint(expr= m.x2698 - m.b3018 <= 0)
m.c2700 = Constraint(expr= m.x2699 - m.b3018 <= 0)
m.c2701 = Constraint(expr= m.x2700 - m.b3018 <= 0)
m.c2702 = Constraint(expr= m.x2701 - m.b3019 <= 0)
m.c2703 = Constraint(expr= m.x2702 - m.b3019 <= 0)
m.c2704 = Constraint(expr= m.x2703 - m.b3019 <= 0)
m.c2705 = Constraint(expr= m.x2704 - m.b3019 <= 0)
m.c2706 = Constraint(expr= m.x2705 - m.b3019 <= 0)
m.c2707 = Constraint(expr= m.x2706 - m.b3019 <= 0)
m.c2708 = Constraint(expr= m.x2707 - m.b3019 | |
<gh_stars>10-100
""" Contains various functions for mathematical/geological transforms. """
from math import isnan, ceil
from warnings import warn
import numpy as np
try:
import cupy as cp
CUPY_AVAILABLE = True
except ImportError:
cp = np
CUPY_AVAILABLE = False
import bottleneck
import numexpr
from numba import njit, prange
from .utils import Accumulator
# Device management
def to_device(array, device='cpu'):
""" Transfer array to chosen GPU, if possible.
If `cupy` is not installed, does nothing.
Parameters
----------
device : str or int
Device specificator. Can be either string (`cpu`, `gpu:4`) or integer (`4`).
"""
if isinstance(device, str) and ':' in device:
device = int(device.split(':')[1])
if device in ['cuda', 'gpu']:
device = 0
if isinstance(device, int):
if CUPY_AVAILABLE:
with cp.cuda.Device(device):
array = cp.asarray(array)
else:
warn('Performance Warning: computing metrics on CPU as `cupy` is not available', RuntimeWarning)
return array
def from_device(array):
""" Move the data from GPU, if needed.
If `cupy` is not installed or supplied array already resides on CPU, does nothing.
"""
if CUPY_AVAILABLE and hasattr(array, 'device'):
array = cp.asnumpy(array)
return array
# Functions to compute various distances between two atleast 2d arrays
def correlation(array1, array2, std1, std2, **kwargs):
""" Compute correlation. """
_ = kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
if xp is np:
covariation = bottleneck.nanmean(numexpr.evaluate('array1 * array2'), axis=-1)
result = numexpr.evaluate('covariation / (std1 * std2)')
else:
covariation = (array1 * array2).mean(axis=-1)
result = covariation / (std1 * std2)
return result
def crosscorrelation(array1, array2, std1, std2, **kwargs):
""" Compute crosscorrelation. """
_ = std1, std2, kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
window = array1.shape[-1]
pad_width = [(0, 0)] * (array2.ndim - 1) + [(window//2, window - window//2)]
padded = xp.pad(array2, pad_width=tuple(pad_width))
accumulator = Accumulator('argmax')
for i in range(window):
corrs = (array1 * padded[..., i:i+window]).sum(axis=-1)
accumulator.update(corrs)
return accumulator.get(final=True).astype(float) - window//2
def btch(array1, array2, std1, std2, **kwargs):
""" Compute Bhattacharyya distance. """
_ = std1, std2, kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
return xp.sqrt(array1 * array2).sum(axis=-1)
def kl(array1, array2, std1, std2, **kwargs):
""" Compute Kullback-Leibler divergence. """
_ = std1, std2, kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
return 1 - (array2 * xp.log2(array2 / array1)).sum(axis=-1)
def js(array1, array2, std1, std2, **kwargs):
""" Compute Janson-Shannon divergence. """
_ = std1, std2, kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
average = (array1 + array2) / 2
log_average = xp.log2(average)
div1 = (array1 * (xp.log2(array1) - log_average)).sum(axis=-1)
div2 = (array2 * (xp.log2(array2) - log_average)).sum(axis=-1)
return 1 - (div1 + div2) / 2
def hellinger(array1, array2, std1, std2, **kwargs):
""" Compute Hellinger distance. """
_ = std1, std2, kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
div = xp.sqrt(((xp.sqrt(array1) - xp.sqrt(array2)) ** 2).sum(axis=-1)) / xp.sqrt(2)
return 1 - div
def tv(array1, array2, std1, std2, **kwargs):
""" Compute total variation distance. """
_ = std1, std2, kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
return 1 - xp.abs(array2 - array1).sum(axis=-1) / 2
# Helper functions
def hilbert(array, axis=-1):
""" Compute the analytic signal, using the Hilbert transform. """
xp = cp.get_array_module(array) if CUPY_AVAILABLE else np
N = array.shape[axis]
fft = xp.fft.fft(array, n=N, axis=axis)
h = xp.zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if array.ndim > 1:
ind = [xp.newaxis] * array.ndim
ind[axis] = slice(None)
h = h[tuple(ind)]
result = xp.fft.ifft(fft * h, axis=axis)
return result
def instantaneous_phase(array, continuous=False, axis=-1):
""" Compute instantaneous phase. """
xp = cp.get_array_module(array) if CUPY_AVAILABLE else np
array = hilbert(array, axis=axis)
phase = xp.angle(array) % (2 * xp.pi) - xp.pi
if continuous:
phase = xp.abs(phase)
return phase
def make_gaussian_kernel(kernel_size=3, sigma=1.):
""" Create Gaussian kernel with given parameters: kernel size and std. """
ax = np.linspace(-(kernel_size - 1) / 2., (kernel_size - 1) / 2., kernel_size)
x_points, y_points = np.meshgrid(ax, ax)
kernel = np.exp(-0.5 * (np.square(x_points) + np.square(y_points)) / np.square(sigma))
gaussian_kernel = (kernel / np.sum(kernel).astype(np.float32))
return gaussian_kernel
def special_convolve(matrix, mode='convolve', kernel_size=3, kernel=None, iters=1,
fill_value=None, preserve=True, margin=np.inf, **_):
""" Convolve the matrix with a given kernel.
A special treatment is given to the missing points (marked with either `fill_value` or `np.nan`),
and to areas with high variance.
Parameters
----------
matrix : ndarray
Array to smooth values in.
mode : str
If 'convolve', then use convolutions with a kernel to compute result.
Otherwise, use median values in a kernel to compute result.
kernel_size : int
If the kernel is not provided, shape of the square kernel with ones.
kernel : ndarray or None
Kernel to convolve with.
iters : int
Number of smoothening iterations to perform.
fill_value : number
Value to ignore in convolutions.
preserve : bool
If False, then all the missing values remain missing in the resulting array.
If True, then missing values are filled with weighted average of nearby points.
margin : number
If the distance between anchor point and the point inside filter is bigger than the margin,
then the point is ignored in convolutions.
Can be used for separate smoothening on sides of discontinuity.
"""
# Choose the filtering function
if mode.startswith('c'):
function = _convolve
else:
function = _medfilt
# Convert all the fill values to nans
matrix = matrix.astype(np.float32).copy()
if fill_value is not None:
matrix[matrix == fill_value] = np.nan
# Make the kernel, if needed. Pad the input
if kernel is None:
kernel = np.ones((kernel_size, kernel_size), dtype=np.float32)
kernel_size = kernel.shape[0]
result = np.pad(matrix, kernel_size, constant_values=np.nan)
# Apply smoothing multiple times. Note that there is no dtype conversion in between
for _ in range(iters):
result = function(result, kernel, preserve=preserve, margin=margin)
result = result[kernel_size:-kernel_size, kernel_size:-kernel_size]
# Remove all the unwanted values
if preserve:
result[np.isnan(matrix)] = np.nan
# Convert nans back to fill value
if fill_value is not None:
result[np.isnan(result)] = fill_value
return result
@njit(parallel=True)
def _convolve(src, kernel, preserve, margin):
""" Jit-accelerated function to apply 2d convolution with special care for nan values. """
#pylint: disable=too-many-nested-blocks, consider-using-enumerate, not-an-iterable
k = int(np.floor(kernel.shape[0] / 2))
raveled_kernel = kernel.ravel() / np.sum(kernel)
i_range, x_range = src.shape
dst = src.copy()
for iline in prange(k, i_range - k):
for xline in range(k, x_range - k):
central = src[iline, xline]
if (preserve is True) and isnan(central):
continue
element = src[iline-k:iline+k+1, xline-k:xline+k+1]
s, sum_weights = np.float32(0), np.float32(0)
for item, weight in zip(element.ravel(), raveled_kernel):
if not isnan(item):
if abs(item - central) <= margin or isnan(central):
s += item * weight
sum_weights += weight
if sum_weights != 0.0:
dst[iline, xline] = s / sum_weights
return dst
@njit(parallel=True)
def _medfilt(src, kernel, preserve, margin):
""" Jit-accelerated function to apply 2d median filter with special care for nan values. """
# margin = 0: median across all non-equal-to-self elements in kernel
# margin = -1: median across all elements in kernel
#pylint: disable=too-many-nested-blocks, consider-using-enumerate, not-an-iterable
k = int(np.floor(kernel.shape[0] / 2))
i_range, x_range = src.shape
dst = src.copy()
for iline in prange(k, i_range - k):
for xline in range(k, x_range - k):
central = src[iline, xline]
if (preserve is True) and isnan(central):
continue
element = src[iline-k:iline+k+1, xline-k:xline+k+1].ravel()
# 0 for close, 1 for distant, 2 for nan
indicator = np.zeros_like(element)
for i in range(len(element)):
item = element[i]
if not isnan(item):
if (abs(item - central) > margin) or isnan(central):
indicator[i] = np.float32(1)
else:
indicator[i] = np.float32(2)
n_close = (indicator == np.float32(0)).sum()
mask_distant = indicator == np.float32(1)
n_distant = mask_distant.sum()
if n_distant > n_close:
dst[iline, xline] = np.median(element[mask_distant])
return dst
def smooth_out(matrix, mode='convolve', kernel_size=3, sigma=2.0, kernel=None, iters=1,
fill_value=None, preserve=True, margin=np.inf, **_):
""" `special_convolve` with a Gaussian kernel. """
if kernel is None:
kernel = make_gaussian_kernel(kernel_size=kernel_size, sigma=sigma)
return special_convolve(matrix, mode=mode, kernel=kernel, iters=iters,
fill_value=fill_value, preserve=preserve, margin=margin)
def digitize(matrix, quantiles):
""" Convert continious metric into binarized version with thresholds defined by `quantiles`. """
bins = np.nanquantile(matrix, np.sort(quantiles)[::-1])
if len(bins) > 1:
digitized = np.digitize(matrix, [*bins, np.nan]).astype(float)
digitized[digitized > 0] -= 1
else:
digitized = np.zeros_like(matrix, dtype=np.float64)
digitized[matrix <= bins[0]] = 1.0
digitized[np.isnan(matrix)] = np.nan
return digitized
def gridify(matrix, frequencies, iline=True, xline=True, full_lines=True):
""" Convert digitized map into grid with various frequencies corresponding to different bins. """
values = np.unique(matrix[~np.isnan(matrix)])
if len(values) != len(frequencies):
min_freq | |
<gh_stars>1-10
import os
import pickle
import re
import sys
list_destparam_0_cpyapi = ['sprintf', 'gets', 'fgets', '_memccpy', '_mbscpy', '_strncpy', 'wmemset', 'vasprintf', 'asprintf', 'wcsncpy', 'lstrcpy', '_wcsncpy', '_snprintf', 'memcpy', 'memmove', '_tcscpy', '_snwprintf', 'strcpy', 'CopyMemory', 'wcsncat', 'vsprintf', 'lstrcpyn', 'vsnprintf', '_mbsncat', 'wmemmove', 'memset', 'wmemcpy', 'strcat', 'fprintf', '_strncat', '_tcsncpy', '_mbsnbcpy', 'strncpy', 'strncat', 'wcscpy', 'snprintf', 'lstrcat']
list_scanf_api = ['vfscanf', 'fscanf', 'vscanf', 'scanf', 'vsscanf', 'sscanf', 'swscanf']
list_key_words = []
def del_ctrl_edge(pdg):
i = 0
while i < pdg.ecount():
if pdg.es[i]['var'] == None:
pdg.delete_edges(i)
else:
i += 1
return pdg
def get_ifname(node_id, dict_if2cfgnode, dict_cfgnode2if):
if_name = ''
min_count = 10000000
for if_n in dict_cfgnode2if[node_id]:
if len(dict_if2cfgnode[if_n]) < min_count:
min_count = len(dict_if2cfgnode[if_n])
if_name = if_n
else:
continue
return if_name
def isFuncCall(node):
result = getCalleeName(node)
if result != []:
return result
else:
return False
def getCalleeName(slicenode):
#get functions' name
code = slicenode['code']
if slicenode['type'] == "Function":
return []
pattern = "((?:_|[A-Za-z])\w*(?:\s(?:\.|::|\->|)\s(?:_|[A-Za-z])\w*)*)\s\("
result = re.findall(pattern, code)
i = 0
while i < len(result):
if result[i] in list_key_words:
del result[i]
else:
i += 1
return result #return is funcnamelist
def getFuncPDGBynodeIDAndtestID(list_cfgNodeID_funcID, testID):
_dict = {}
working_directory = directory + "pdg_db"
for _tuple in list_cfgNodeID_funcID:
cfgNodeID = _tuple[0]
func_id = _tuple[1]
path = os.path.join(working_directory, testID)
for _file in os.listdir(path):
if _file.split('_')[-1] == func_id:
fpath = os.path.join(path, _file)
fin = open(fpath, 'rb')
pdg = pickle.load(fin)
_dict[cfgNodeID] = pdg
fin.close()
break
return _dict
def getFuncPDGBynodeIDAndtestID_noctrl(list_cfgNodeID_funcID, testID):
_dict = {}
working_directory = directory + "pdg_db/"
for _tuple in list_cfgNodeID_funcID:
cfgNodeID = _tuple[0]
func_id = _tuple[1]
for _dir in os.listdir(working_directory):
list_testid = os.listdir(os.path.join(working_directory, _dir))
if testID not in list_testid:
continue
else:
path = os.path.join(working_directory, _dir, testID)
for _file in os.listdir(path):
if _file.split('_')[-1] == func_id:
fpath = os.path.join(path, _file)
fin = open(fpath, 'rb')
pdg = pickle.load(fin)
_dict[cfgNodeID] = pdg
fin.close()
break
return _dict
def getFuncPDGByfuncIDAndtestID(func_ID, testID):
working_directory = directory + "pdg_db"
path = os.path.join(working_directory, testID)
pdg = False
for _file in os.listdir(path):
if _file.split('_')[-1] == str(func_ID):
fpath = os.path.join(path, _file)
fin = open(fpath, 'rb')
pdg = pickle.load(fin)
fin.close()
break
return pdg
def getFuncPDGByfuncIDAndtestID_noctrl(func_ID, testID, _type):
pdg = False
working_directory = directory + "pdg_db/"
for _dir in os.listdir(working_directory):
list_testid = os.listdir(os.path.join(working_directory, _dir))
if testID not in list_testid:
continue
else:
path = os.path.join(working_directory, _dir, testID)
for _file in os.listdir(path):
if _file.split('_')[-1] == str(func_ID):
fpath = os.path.join(path, _file)
fin = open(fpath, 'rb')
pdg = pickle.load(fin)
fin.close()
break
return pdg
def getReturnVarOfAPI(code):
for api in list_destparam_0_cpyapi:
if code.find(api + ' ') != -1:
_list = code.split(api + ' ')
if _list[0] == '' and _list[1][0] == '(':
var = _list[1].split(',')[0].replace('(', '').strip()
if var.find(' & ') > -1:
var = var.split(' & ')[1]
if var.find(' + ') != -1:
var = var.split(' + ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
return _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
return _list
else:
return [var]
elif var.find(' - ') != -1:
var = var.split(' - ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
return _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
return _list
else:
return [var]
elif var.find(' * ') != -1:
temp = var.split(' * ')[1]
if temp[0] == ')':
var = temp[1:].strip()
else:
var = var.split(' * ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
return _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
return _list
else:
return [var]
elif var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
return _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
return _list
else:
return [var]
else:
continue
for scanfapi in list_scanf_api:
if scanfapi in ['fscanf', 'sscanf', 'swscanf', 'vfscanf', 'vsscanf']:
if code.find(scanfapi + ' ') != -1:
_list = code.split(scanfapi+' ')
if _list[0] == '' and _list[1][0] == '(':
list_var = _list[1].split(',')[2:]
list_var = [var.replace('(', '').strip() for var in list_var]
new_list_var = []
for var in list_var:
if var.find(' & ') > -1:
var = var.split(' & ')[1]
if var.find(' + ') > -1:
var = var.split(' + ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
new_list_var += _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
new_list_var += _list
else:
new_list_var.append(var)
elif var.find(' - ') != -1:
var = var.split(' - ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
new_list_var += _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
new_list_var += _list
else:
new_list_var.append(var)
elif var.find(' * ') != -1:
temp = var.split(' * ')[1]
if temp[0] == ')':
var = temp[1:].strip()
else:
var = var.split(' * ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
new_list_var += _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
new_list_var += _list
else:
new_list_var.append(var)
elif var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
new_list_var += _list
elif var.find(' -> ') != -1:
_list = [var]
var_1 = []
while var.find(' -> ') != -1:
var_1.append(var.split(' -> ')[0])
_list.append(' -> '.join(var_1))
var = ' -> '.join(var.split(' -> ')[1:])
new_list_var += _list
else:
new_list_var.append(var)
return new_list_var
elif scanfapi in ['scanf', 'vscanf']:
if code.find(scanfapi) != -1:
_list = code.split(scanfapi + ' ')
if _list[0] == '' and _list[1][0] == '(':
list_var = _list[1].split(',')[1:]
list_var = [var.replace('(', '').strip() for var in list_var]
new_list_var = []
for var in list_var:
if var.find(' & ') > -1:
var = var.split(' & ')[1]
if var.find(' + ') != -1:
var = var.split(' + ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
new_list_var += _list
else:
new_list_var.append(var)
elif var.find(' - ') != -1:
var = var.split(' - ')[0]
if var.find(' . ') != -1:
_list = [var]
var_1 = []
while var.find(' . ') != -1:
var_1.append(var.split(' . ')[0])
_list.append(' . '.join(var_1))
var = ' . '.join(var.split(' . ')[1:])
new_list_var += _list
else:
new_list_var.append(var)
elif var.find(' * ') != -1:
temp = var.split(' * ')[1]
if temp[0] == ')':
var = temp[1:].strip()
else:
| |
= False
it_since_step_all[~complete] = it_since_step + 1
if it > 0:
batch_complete = ((diff < tol) & ((alp_diff < tol) | (alp_new < tol))) | xp.isnan(diff) | step_stop
# batch_complete = ((diff < tol) & (alp_diff < tol)) | xp.isnan(diff) | step_stop
# ignore stepsize if last values met tol (so var_beta is consistant)
if xp.any(batch_complete):
batch_complete_all = xp.full_like(complete, False)
batch_complete_all[~complete] = batch_complete
beta_k_all[batch_complete_all] = beta_new[batch_complete]
alp_k_all[batch_complete_all] = alp_new[batch_complete]
complete[~complete] = batch_complete
# print("pirls", it, xp.sum(complete), xp.sum(~complete))
pbar.set_postfix_str(
"iter="
+ str(it)
+ ", med_err/tol="
+ str(xp.round(xp.median(diff) / tol, 2))
+ ", med_alp_err/tol="
+ str(xp.round(xp.median(alp_diff) / tol, 2)),
)
pbar.update(xp.to_cpu(xp.sum(complete)) - pbar.n)
it = it + 1
if xp.all(complete) | (it > max_it):
pbar.close()
if ret_beta:
return mu_k_all, alp_k_all, beta_k_all, var_beta_k_all
else:
if bootstrap:
# pylint: disable=import-outside-toplevel
logging.warning("Testing GAM bootstrap, this is currently broken and unstable")
# var_beta_DP = make_DP(var_beta_k_all)
var_beta_DP = var_beta_k_all
for k in range(100):
if link == "g":
continue
i = 50 + k
var_beta = var_beta_DP[i]
beta = beta_k_all[i]
lam = alp_k_all[i]
bs_beta_k = [
beta,
]
bs_var_beta_k = [
var_beta,
]
bs_lam_k = [
lam,
]
for _ in range(20):
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
beta_rand = xp.random.multivariate_normal( # pylint: disable=unexpected-keyword-arg
beta,
var_beta,
method="svd",
)
lp_rand = xp.einsum("ij,j->i", x[0], beta_rand)
_, bs_lam, bs_beta, bs_var_beta = PIRLS(
x=x_all[i][None, :],
y=lp_rand[None, :],
alp=alp,
pen=pen_all[i][None, ...],
tol=tol,
dist=dist,
max_it=max_it,
w=w,
gamma=gamma,
tqdm_label="BS",
ret_beta=True,
)
_, _, bs_beta, bs_var_beta = PIRLS(
x=x_all[i][None, :],
y=y_all[i][None, ...],
alp=bs_lam,
pen=pen_all[i][None, ...],
tol=tol,
dist=dist,
max_it=max_it,
w=w,
gamma=gamma,
tqdm_label="BS",
ret_beta=True,
fixed_lam=True,
)
# bs_var_beta = make_DP(bs_var_beta)
bs_beta_k.append(bs_beta[0])
bs_var_beta_k.append(bs_var_beta[0])
bs_lam_k.append(bs_lam[0])
beta = bs_beta[0]
var_beta = bs_var_beta[0]
lam = bs_lam[0]
lp_rands = []
for _ in range(1000):
j = xp.random.randint(20)
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
beta_rand = xp.random.multivariate_normal( # pylint: disable=unexpected-keyword-arg
bs_beta_k[int(j)],
bs_var_beta_k[int(j)],
method="svd",
)
lp_rand = xp.einsum("ij,j->i", x[0], beta_rand)
lp_rands.append(lp_rand)
lp_rand = xp.stack(lp_rands)
a = xp.quantile(lp_rand, (0.25, 0.75), axis=0)
b = xp.quantile(lp_rand, (0.025, 0.975), axis=0)
import matplotlib.pyplot as plt
plt.plot(xp.to_cpu(lp_rand.T), alpha=0.1)
plt.plot(xp.to_cpu(a.T), color="red")
plt.plot(xp.to_cpu(b.T), color="red")
plt.plot(xp.to_cpu(lp_k_all[i]), linewidth=3, color="black")
plt.show()
return mu_k_all
def ridge(x, y, alp=0.0):
"""Calculate the exact soln to the ridge regression of the weights for basis x that fit batched data y."""
xtx = xp.einsum("ijk,ijl->ikl", x, x)
t1 = xp.linalg.inv(alp * xp.tile(xp.identity(xtx.shape[1]), (xtx.shape[0], 1, 1)) + xtx)
t2 = xp.einsum("ijk,ij->ik", x, y)
w = xp.einsum("ijk,ij->ik", t1, t2)
return w
@sync_numerical_libs
def lin_reg(y, x=None, alp=0.0, quad=False, return_fit=True):
"""Calculate exact soln for batched linear regression and return either weights or fitted values."""
if x is None:
x = xp.arange(y.shape[1], dtype=float)
x = xp.tile(x, (y.shape[0], 1))
basis_list = [xp.ones_like(x), x]
if quad:
basis_list.append(x**2)
basis = xp.stack(basis_list, axis=1).swapaxes(1, 2)
w = ridge(basis, y, alp)
if return_fit:
ret = xp.sum((w[:, None, :] * basis), axis=-1)
return ret
else:
return w
@sync_numerical_libs
def logistic_fit(y, x_out, x=None, alp=0.6, t0_max=200, L=None):
"""WIP Fit a logistic function to batched y."""
# TODO this is WIP
if x is None:
x = xp.arange(y.shape[1], dtype=float)
x = xp.tile(x, (y.shape[0], 1))
slopes = xp.gradient(y, axis=1)
ratio = slopes / y
w = lin_reg(ratio, xp.array(y), alp=alp)
k = w[:, 0]
if L is None:
L = -k / w[:, 1]
else:
L = xp.full_like(k, L)
test = L[:, None, None] / (
1.0 + xp.exp(-k[:, None, None] * (xp.arange(y.shape[1])[None, :] - xp.arange(t0_max)[:, None])[None, :, :])
)
err = xp.nansum((test - y[:, None, :]) ** 2.0, axis=-1)
t0 = xp.argmin(err, axis=-1)
y_out = L[:, None] / (1.0 + xp.exp(-k[:, None] * (x_out - t0[:, None])))
return y_out
# @memory.cache
def opt_lam(x, y, alp=0.6, pen=None, min_lam=0.1, step_size=None, tol=1e-3, max_it=100, gamma=1.0, fixed_lam=False):
"""Calculate the exact soln to the ridge regression of the weights for basis x that fit data y."""
xtx_all = xp.einsum("ijk,ijl->ikl", x, x)
if "ndarray" in str(type(alp)):
lam_all = alp.copy()
else:
lam_all = xp.full((x.shape[0],), alp)
if pen is None:
raise NotImplementedError
# d = xp.ones(x.shape[-1])
# d[0] = 0.0
# d[1] = 0.0
# pen_mat_all = xp.tile(xp.diag(d), (xtx.shape[0], 1, 1))
else:
pen_mat_all = xp.pad(pen, ((0, 0), (2, 0), (2, 0)))
if step_size is None:
step_size = xp.ones_like(lam_all)
q_all = xp.empty_like(x)
r_all = xp.empty((x.shape[0], x.shape[-1], x.shape[-1]))
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
q_all, r_all = xp.linalg.qr(x.astype(xp.float64))
complete = xp.full((y.shape[0],), False, dtype=bool)
Vg_out = xp.empty((y.shape[0],))
y_out = xp.empty(y.shape)
beta_out = xp.empty((x.shape[0], x.shape[2]))
var_beta_out = xp.empty((x.shape[0], x.shape[2], x.shape[2]))
x_in = x.copy()
y_in = y.copy()
it = 0
while True:
lam = lam_all[~complete]
pen_mat = pen_mat_all[~complete]
q = q_all[~complete]
r = r_all[~complete]
xtx = xtx_all[~complete]
x = x_in[~complete]
y = y_in[~complete]
s = (min_lam + lam[..., None, None]) * pen_mat
t1 = xp.linalg.inv(xtx + s)
# t2 = xp.einsum("ijk,ij->ik", x, y)
# w = xp.einsum("ijk,ij->ik", t1, t2)
a = xp.einsum("ijk,ikl,iml->ijm", x, t1, x)
s_DP = make_DP(s.astype(xp.float64))
b = xp.linalg.cholesky(s_DP)
if xp.any(xp.isnan(b)):
logging.error("batch cholsky hit nan")
raise ValueError
# from IPython import embed
# embed()
aug = xp.hstack((r, b))
# NB: cupy's batched svd is giving incorrect results for float64?
u = xp.empty((aug.shape[0], aug.shape[1], aug.shape[1]))
d_diag = xp.empty((aug.shape[0], aug.shape[2]))
vt = xp.empty((aug.shape[0], aug.shape[2], aug.shape[2]))
u, d_diag, vt = xp.linalg.svd(aug.astype(xp.float64))
# eps = xp.finfo(x.dtype).eps
# check D isn't rank deficient here
# if xp.any(d_diag < (d_diag[:, 0] * xp.sqrt(eps))[..., None]):
# # TODO if they are we can remove them but for now just throw an err
# raise ValueError
u1 = u[:, : r.shape[1], : r.shape[2]]
trA = xp.einsum("bij,bij->b", u1, u1)
y1 = xp.einsum("bji,bkj,bk->bi", u1, q, y)
invd_diag = 1.0 / d_diag
m = invd_diag[:, None, :] * (vt @ s @ xp.swapaxes(vt, 1, 2)) * invd_diag[:, :, None]
k = xp.einsum("bij,bkj,bkl->bil", m, u1, u1)
y1t = y1[:, None, :]
dalpdrho = 2.0 * lam[..., None, None] * (y1t @ m @ y1[..., None] - y1t @ k @ y1[..., None])
d2alpdrho = (
2.0
* lam[..., None, None]
* lam[..., None, None]
* y1t
@ (2.0 * m @ k - 2.0 * m @ m + k @ m)
@ y1[..., None]
+ dalpdrho
)
n = x.shape[1]
dtrAdrho = xp.einsum("b,bii->b", -lam, k)
d2trAd2rho = 2.0 * xp.einsum("b,b,bii->b", lam, lam, m @ k) + dtrAdrho
ddeltadrho = -gamma * dtrAdrho
d2deltad2rho = -gamma * d2trAd2rho # todo double check
delta = n - gamma * trA
fitted_y = xp.einsum("bij,bj->bi", a, y)
alpha = xp.sum((y - fitted_y) ** 2.0, axis=-1)
Vg = n * alpha / delta / delta
dVgdrho = n / delta / delta * dalpdrho[:, 0, 0] - 2.0 * n * alpha / delta / delta / delta * ddeltadrho
d2Vgd2rho = (
-2.0 * n / delta / delta / delta * ddeltadrho * dalpdrho[:, 0, 0]
+ n / delta / delta * d2alpdrho[:, 0, 0]
- 2.0 * n / delta / delta / delta * dalpdrho[:, 0, 0] * ddeltadrho
+ 6.0 * n * alpha / (delta**4) * ddeltadrho * ddeltadrho
- 2.0 * n * alpha / (delta**3) * d2deltad2rho
)
rho = xp.log(lam)
drho = dVgdrho / d2Vgd2rho
nanmask = xp.isnan(drho)
drho[nanmask] = 0.0
drho = xp.clip(drho, a_min=-2.0, a_max=2.0)
new_rho = rho - drho
y_out[~complete] = fitted_y
lam_all[~complete] = xp.exp(new_rho)
beta_out[~complete] = ((xp.swapaxes(vt, 1, 2) * invd_diag[:, None]) @ y1[..., None])[:, :, 0]
Vg_out[~complete] = Vg
var_beta_out[~complete] = xp.einsum(
"bij,bj,bjk->bik",
xp.swapaxes(vt, 1, 2),
invd_diag**2,
vt,
) # TODO double check this
if (it > 0) or fixed_lam:
batch_complete = xp.abs(drho / rho) < tol
complete[~complete] = batch_complete
if fixed_lam:
complete = xp.full_like(complete, True)
if xp.sum(~complete) < 1:
break
it += 1
if it >= max_it:
break
return y_out, beta_out, var_beta_out, lam_all, Vg_out
# @memory.cache(ignore=["label"])
@sync_numerical_libs
def fit(
y,
x=None,
df=10,
alp=2.0,
dist="g",
standardize=True,
w=None,
gamma=1.4,
tol=1.0e-7,
clip=(None, None),
label="fit",
bootstrap=False,
):
"""Perform fit of natural cubic splines to the vector y, return the smoothed y."""
# TODO handle df and alp as vectors
# standardize inputs
if x is None:
x = xp.arange(0, y.shape[1])
x = xp.tile(x, (y.shape[0], 1))
if standardize:
if dist == "g":
y_mean = xp.mean(y, axis=1, keepdims=True)
y_var = xp.var(y, axis=1, keepdims=True)
# y_range = xp.max(y, axis=1, keepdims=True) - xp.min(y, axis=1, keepdims=True)
y_in | |
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the XML-format help generated by the flags.py module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import string
import sys
import xml.dom.minidom
import xml.sax.saxutils
from absl import flags
from absl.flags import _helpers
from absl.flags.tests import module_bar
from absl.testing import absltest
import six
class CreateXMLDOMElement(absltest.TestCase):
def _check(self, name, value, expected_output):
doc = xml.dom.minidom.Document()
node = _helpers.create_xml_dom_element(doc, name, value)
output = node.toprettyxml(' ', encoding='utf-8')
self.assertEqual(expected_output, output)
def test_create_xml_dom_element(self):
self._check('tag', '', b'<tag></tag>\n')
self._check('tag', 'plain text', b'<tag>plain text</tag>\n')
self._check('tag', '(x < y) && (a >= b)',
b'<tag>(x < y) && (a >= b)</tag>\n')
# If the value is bytes with invalid unicode:
bytes_with_invalid_unicodes = b'\x81\xff'
if six.PY2:
# In python 2 the string representation is invalid unicode so they are
# stripped.
self._check('tag', bytes_with_invalid_unicodes, b'<tag></tag>\n')
else:
# In python 3 the string representation is "b'\x81\xff'" so they are kept
# as "b'\x81\xff'".
self._check('tag', bytes_with_invalid_unicodes,
b"<tag>b'\\x81\\xff'</tag>\n")
# Some unicode chars are illegal in xml
# (http://www.w3.org/TR/REC-xml/#charsets):
self._check('tag', u'\x0b\x02\x08\ufffe', b'<tag></tag>\n')
# Valid unicode will be encoded:
self._check('tag', u'\xff', b'<tag>\xc3\xbf</tag>\n')
def _list_separators_in_xmlformat(separators, indent=''):
"""Generates XML encoding of a list of list separators.
Args:
separators: A list of list separators. Usually, this should be a
string whose characters are the valid list separators, e.g., ','
means that both comma (',') and space (' ') are valid list
separators.
indent: A string that is added at the beginning of each generated
XML element.
Returns:
A string.
"""
result = ''
separators = list(separators)
separators.sort()
for sep_char in separators:
result += ('%s<list_separator>%s</list_separator>\n' %
(indent, repr(sep_char)))
return result
class FlagCreateXMLDOMElement(absltest.TestCase):
"""Test the create_xml_dom_element method for a single flag at a time.
There is one test* method for each kind of DEFINE_* declaration.
"""
def setUp(self):
# self.fv is a FlagValues object, just like flags.FLAGS. Each
# test registers one flag with this FlagValues.
self.fv = flags.FlagValues()
def _check_flag_help_in_xml(self, flag_name, module_name,
expected_output, is_key=False):
flag_obj = self.fv[flag_name]
doc = xml.dom.minidom.Document()
element = flag_obj._create_xml_dom_element(doc, module_name, is_key=is_key)
output = element.toprettyxml(indent=' ')
self.assertMultiLineEqual(expected_output, output)
def test_flag_help_in_xml_int(self):
flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=self.fv)
expected_output_pattern = (
'<flag>\n'
' <file>module.name</file>\n'
' <name>index</name>\n'
' <meaning>An integer flag</meaning>\n'
' <default>17</default>\n'
' <current>%d</current>\n'
' <type>int</type>\n'
'</flag>\n')
self._check_flag_help_in_xml('index', 'module.name',
expected_output_pattern % 17)
# Check that the output is correct even when the current value of
# a flag is different from the default one.
self.fv['index'].value = 20
self._check_flag_help_in_xml('index', 'module.name',
expected_output_pattern % 20)
def test_flag_help_in_xml_int_with_bounds(self):
flags.DEFINE_integer('nb_iters', 17, 'An integer flag',
lower_bound=5, upper_bound=27,
flag_values=self.fv)
expected_output = (
'<flag>\n'
' <key>yes</key>\n'
' <file>module.name</file>\n'
' <name>nb_iters</name>\n'
' <meaning>An integer flag</meaning>\n'
' <default>17</default>\n'
' <current>17</current>\n'
' <type>int</type>\n'
' <lower_bound>5</lower_bound>\n'
' <upper_bound>27</upper_bound>\n'
'</flag>\n')
self._check_flag_help_in_xml('nb_iters', 'module.name', expected_output,
is_key=True)
def test_flag_help_in_xml_string(self):
flags.DEFINE_string('file_path', '/path/to/my/dir', 'A test string flag.',
flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>simple_module</file>\n'
' <name>file_path</name>\n'
' <meaning>A test string flag.</meaning>\n'
' <default>/path/to/my/dir</default>\n'
' <current>/path/to/my/dir</current>\n'
' <type>string</type>\n'
'</flag>\n')
self._check_flag_help_in_xml('file_path', 'simple_module', expected_output)
def test_flag_help_in_xml_string_with_xmlillegal_chars(self):
flags.DEFINE_string('file_path', '/path/to/\x08my/dir',
'A test string flag.', flag_values=self.fv)
# '\x08' is not a legal character in XML 1.0 documents. Our
# current code purges such characters from the generated XML.
expected_output = (
'<flag>\n'
' <file>simple_module</file>\n'
' <name>file_path</name>\n'
' <meaning>A test string flag.</meaning>\n'
' <default>/path/to/my/dir</default>\n'
' <current>/path/to/my/dir</current>\n'
' <type>string</type>\n'
'</flag>\n')
self._check_flag_help_in_xml('file_path', 'simple_module', expected_output)
def test_flag_help_in_xml_boolean(self):
flags.DEFINE_boolean('use_gpu', False, 'Use gpu for performance.',
flag_values=self.fv)
expected_output = (
'<flag>\n'
' <key>yes</key>\n'
' <file>a_module</file>\n'
' <name>use_gpu</name>\n'
' <meaning>Use gpu for performance.</meaning>\n'
' <default>false</default>\n'
' <current>false</current>\n'
' <type>bool</type>\n'
'</flag>\n')
self._check_flag_help_in_xml('use_gpu', 'a_module', expected_output,
is_key=True)
def test_flag_help_in_xml_enum(self):
flags.DEFINE_enum('cc_version', 'stable', ['stable', 'experimental'],
'Compiler version to use.', flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>cc_version</name>\n'
' <meaning><stable|experimental>: '
'Compiler version to use.</meaning>\n'
' <default>stable</default>\n'
' <current>stable</current>\n'
' <type>string enum</type>\n'
' <enum_value>stable</enum_value>\n'
' <enum_value>experimental</enum_value>\n'
'</flag>\n')
self._check_flag_help_in_xml('cc_version', 'tool', expected_output)
def test_flag_help_in_xml_comma_separated_list(self):
flags.DEFINE_list('files', 'a.cc,a.h,archive/old.zip',
'Files to process.', flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>files</name>\n'
' <meaning>Files to process.</meaning>\n'
' <default>a.cc,a.h,archive/old.zip</default>\n'
' <current>[\'a.cc\', \'a.h\', \'archive/old.zip\']</current>\n'
' <type>comma separated list of strings</type>\n'
' <list_separator>\',\'</list_separator>\n'
'</flag>\n')
self._check_flag_help_in_xml('files', 'tool', expected_output)
def test_list_as_default_argument_comma_separated_list(self):
flags.DEFINE_list('allow_users', ['alice', 'bob'],
'Users with access.', flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>allow_users</name>\n'
' <meaning>Users with access.</meaning>\n'
' <default>alice,bob</default>\n'
' <current>[\'alice\', \'bob\']</current>\n'
' <type>comma separated list of strings</type>\n'
' <list_separator>\',\'</list_separator>\n'
'</flag>\n')
self._check_flag_help_in_xml('allow_users', 'tool', expected_output)
def test_none_as_default_arguments_comma_separated_list(self):
flags.DEFINE_list('allow_users', None,
'Users with access.', flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>allow_users</name>\n'
' <meaning>Users with access.</meaning>\n'
' <default></default>\n'
' <current>None</current>\n'
' <type>comma separated list of strings</type>\n'
' <list_separator>\',\'</list_separator>\n'
'</flag>\n')
self._check_flag_help_in_xml('allow_users', 'tool', expected_output)
def test_flag_help_in_xml_space_separated_list(self):
flags.DEFINE_spaceseplist('dirs', 'src libs bin',
'Directories to search.', flag_values=self.fv)
expected_separators = sorted(string.whitespace)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>dirs</name>\n'
' <meaning>Directories to search.</meaning>\n'
' <default>src libs bin</default>\n'
' <current>[\'src\', \'libs\', \'bin\']</current>\n'
' <type>whitespace separated list of strings</type>\n'
'LIST_SEPARATORS'
'</flag>\n').replace('LIST_SEPARATORS',
_list_separators_in_xmlformat(expected_separators,
indent=' '))
self._check_flag_help_in_xml('dirs', 'tool', expected_output)
def test_flag_help_in_xml_space_separated_list_with_comma_compat(self):
flags.DEFINE_spaceseplist('dirs', 'src libs,bin',
'Directories to search.', comma_compat=True,
flag_values=self.fv)
expected_separators = sorted(string.whitespace + ',')
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>dirs</name>\n'
' <meaning>Directories to search.</meaning>\n'
' <default>src libs bin</default>\n'
' <current>[\'src\', \'libs\', \'bin\']</current>\n'
' <type>whitespace or comma separated list of strings</type>\n'
'LIST_SEPARATORS'
'</flag>\n').replace('LIST_SEPARATORS',
_list_separators_in_xmlformat(expected_separators,
indent=' '))
self._check_flag_help_in_xml('dirs', 'tool', expected_output)
def test_flag_help_in_xml_multi_string(self):
flags.DEFINE_multi_string('to_delete', ['a.cc', 'b.h'],
'Files to delete', flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>to_delete</name>\n'
' <meaning>Files to delete;\n'
' repeat this option to specify a list of values</meaning>\n'
' <default>[\'a.cc\', \'b.h\']</default>\n'
' <current>[\'a.cc\', \'b.h\']</current>\n'
' <type>multi string</type>\n'
'</flag>\n')
self._check_flag_help_in_xml('to_delete', 'tool', expected_output)
def test_flag_help_in_xml_multi_int(self):
flags.DEFINE_multi_integer('cols', [5, 7, 23],
'Columns to select', flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>cols</name>\n'
' <meaning>Columns to select;\n '
'repeat this option to specify a list of values</meaning>\n'
' <default>[5, 7, 23]</default>\n'
' <current>[5, 7, 23]</current>\n'
' <type>multi int</type>\n'
'</flag>\n')
self._check_flag_help_in_xml('cols', 'tool', expected_output)
def test_flag_help_in_xml_multi_enum(self):
flags.DEFINE_multi_enum('flavours', ['APPLE', 'BANANA'],
['APPLE', 'BANANA', 'CHERRY'],
'Compilation flavour.', flag_values=self.fv)
expected_output = (
'<flag>\n'
' <file>tool</file>\n'
' <name>flavours</name>\n'
' <meaning>Compilation flavour.;\n'
' repeat this option to specify a list of values</meaning>\n'
' <default>[\'APPLE\', \'BANANA\']</default>\n'
' <current>[\'APPLE\', \'BANANA\']</current>\n'
' <type>multi string enum</type>\n'
' <enum_value>APPLE</enum_value>\n'
' <enum_value>BANANA</enum_value>\n'
' <enum_value>CHERRY</enum_value>\n'
'</flag>\n')
self._check_flag_help_in_xml('flavours', 'tool', expected_output)
# The next EXPECTED_HELP_XML_* constants are parts of a template for
# the expected XML output from WriteHelpInXMLFormatTest below. When
# we assemble these parts into a single big string, we'll take into
# account the ordering between the name of the main module and the
# name of module_bar. Next, we'll fill in the docstring for this
# module (%(usage_doc)s), the name of the main module
# (%(main_module_name)s) and the name of the module module_bar
# (%(module_bar_name)s). See WriteHelpInXMLFormatTest below.
EXPECTED_HELP_XML_START = """\
<?xml version="1.0" encoding="utf-8"?>
<AllFlags>
<program>%(basename_of_argv0)s</program>
<usage>%(usage_doc)s</usage>
"""
EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE = """\
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>allow_users</name>
<meaning>Users with access.</meaning>
<default>alice,bob</default>
<current>['alice', 'bob']</current>
<type>comma separated list of strings</type>
<list_separator>','</list_separator>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>cc_version</name>
<meaning><stable|experimental>: Compiler version to use.</meaning>
<default>stable</default>
<current>stable</current>
<type>string enum</type>
<enum_value>stable</enum_value>
<enum_value>experimental</enum_value>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>cols</name>
<meaning>Columns to select;
repeat this option to specify a list of values</meaning>
<default>[5, 7, 23]</default>
<current>[5, 7, 23]</current>
<type>multi int</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>dirs</name>
<meaning>Directories to create.</meaning>
<default>src libs bins</default>
<current>['src', 'libs', 'bins']</current>
<type>whitespace separated list of strings</type>
%(whitespace_separators)s </flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>file_path</name>
<meaning>A test string flag.</meaning>
<default>/path/to/my/dir</default>
<current>/path/to/my/dir</current>
<type>string</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>files</name>
<meaning>Files to process.</meaning>
<default>a.cc,a.h,archive/old.zip</default>
<current>['a.cc', 'a.h', 'archive/old.zip']</current>
<type>comma separated list of strings</type>
<list_separator>\',\'</list_separator>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>flavours</name>
<meaning>Compilation flavour.;
repeat this option to specify a list of values</meaning>
<default>['APPLE', 'BANANA']</default>
<current>['APPLE', 'BANANA']</current>
<type>multi string enum</type>
<enum_value>APPLE</enum_value>
<enum_value>BANANA</enum_value>
<enum_value>CHERRY</enum_value>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>index</name>
<meaning>An integer flag</meaning>
<default>17</default>
<current>17</current>
<type>int</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>nb_iters</name>
<meaning>An integer flag</meaning>
<default>17</default>
<current>17</current>
<type>int</type>
<lower_bound>5</lower_bound>
<upper_bound>27</upper_bound>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>to_delete</name>
<meaning>Files to delete;
repeat this option to specify a list of values</meaning>
<default>['a.cc', 'b.h']</default>
<current>['a.cc', 'b.h']</current>
<type>multi string</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>use_gpu</name>
<meaning>Use gpu for performance.</meaning>
| |
<reponame>abosoar/camel_tools
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2020 New York University <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The morphological analyzer component of CAMeL Tools.
"""
from __future__ import absolute_import
from collections import deque, namedtuple
import copy
import itertools
import re
from threading import RLock
from cachetools import LFUCache, cached
from camel_tools.utils.charsets import UNICODE_PUNCT_SYMBOL_CHARSET
from camel_tools.utils.charsets import AR_CHARSET, AR_DIAC_CHARSET
from camel_tools.utils.charmap import CharMapper
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.errors import AnalyzerError
from camel_tools.morphology.utils import merge_features
from camel_tools.morphology.utils import simple_ar_to_caphi
from camel_tools.utils.dediac import dediac_ar
_ALL_PUNC = u''.join(UNICODE_PUNCT_SYMBOL_CHARSET)
_DIAC_RE = re.compile(u'[' + re.escape(u''.join(AR_DIAC_CHARSET)) + u']')
_IS_DIGIT_RE = re.compile(u'^.*[0-9\u0660-\u0669]+.*$')
_IS_STRICT_DIGIT_RE = re.compile(u'^[0-9\u0660-\u0669]+$')
_IS_PUNC_RE = re.compile(u'^[' + re.escape(_ALL_PUNC) + u']+$')
_HAS_PUNC_RE = re.compile(u'[' + re.escape(_ALL_PUNC) + u']')
_IS_AR_RE = re.compile(u'^[' + re.escape(u''.join(AR_CHARSET)) + u']+$')
# Identify No Analysis marker
_NOAN_RE = re.compile(u'NOAN')
_COPY_FEATS = frozenset(['gloss', 'atbtok', 'atbseg', 'd1tok', 'd1seg',
'd2tok', 'd2seg', 'd3tok', 'd3seg', 'bwtok'])
_UNDEFINED_LEX_FEATS = frozenset(['root', 'pattern', 'caphi'])
DEFAULT_NORMALIZE_MAP = CharMapper({
u'\u0625': u'\u0627',
u'\u0623': u'\u0627',
u'\u0622': u'\u0627',
u'\u0671': u'\u0627',
u'\u0649': u'\u064a',
u'\u0629': u'\u0647',
u'\u0640': u''
})
""":obj:`~camel_tools.utils.charmap.CharMapper`: The default character map used
for normalization by :obj:`Analyzer`.
Removes the tatweel/kashida character and does the following conversions:
- 'إ' to 'ا'
- 'أ' to 'ا'
- 'آ' to 'ا'
- 'ٱ' to 'ا'
- 'ى' to 'ي'
- 'ة' to 'ه'
"""
_BACKOFF_TYPES = frozenset(['NONE', 'NOAN_ALL', 'NOAN_PROP', 'ADD_ALL',
'ADD_PROP'])
class AnalyzedWord(namedtuple('AnalyzedWord', ['word', 'analyses'])):
"""A named tuple containing a word and its analyses.
Attributes:
word (:obj:`str`): The analyzed word.
analyses (:obj:`list` of :obj:`dict`): List of analyses for **word**.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
"""
def _is_digit(word):
return _IS_DIGIT_RE.match(word) is not None
def _is_strict_digit(word):
return _IS_STRICT_DIGIT_RE.match(word) is not None
def _is_punc(word):
return _IS_PUNC_RE.match(word) is not None
def _has_punc(word):
return _HAS_PUNC_RE.search(word) is not None
def _is_ar(word):
return _IS_AR_RE.match(word) is not None
def _segments_gen(word, max_prefix=1, max_suffix=1):
w = len(word)
for p in range(0, min(max_prefix, w - 1) + 1):
prefix = word[:p]
for s in range(max(1, w - p - max_suffix), w - p + 1):
stem = word[p:p+s]
suffix = word[p+s:]
yield (prefix, stem, suffix)
class Analyzer:
"""Morphological analyzer component.
Args:
db (:obj:`~camel_tools.morphology.database.MorphologyDB`): Database to
use for analysis. Must be opened in analysis or reinflection mode.
backoff (:obj:`str`, optional): Backoff mode. Can be one of the
following: 'NONE', 'NOAN_ALL', 'NOAN_PROP', 'ADD_ALL', or
'ADD_PROP'. Defaults to 'NONE'.
norm_map (:obj:`~camel_tools.utils.charmap.CharMapper`, optional):
Character map for normalizing input words. If set to None, then
:const:`DEFAULT_NORMALIZE_MAP` is used.
Defaults to None.
strict_digit (:obj:`bool`, optional): If set to `True`, then only words
completely comprised of digits are considered numbers, otherwise,
all words containing a digit are considered numbers. Defaults to
`False`.
cache_size (:obj:`int`, optional): If greater than zero, then the
analyzer will cache the analyses for the **cache_Size** most
frequent words, otherwise no analyses will be cached.
Raises:
:obj:`~camel_tools.morphology.errors.AnalyzerError`: If database is
not an instance of
(:obj:`~camel_tools.morphology.database.MorphologyDB`), if **db**
does not support analysis, or if **backoff** is not a valid backoff
mode.
"""
def __init__(self, db, backoff='NONE',
norm_map=None,
strict_digit=False,
cache_size=0):
if not isinstance(db, MorphologyDB):
raise AnalyzerError('DB is not an instance of MorphologyDB')
if not db.flags.analysis:
raise AnalyzerError('DB does not support analysis')
self._db = db
self._backoff = backoff
self._strict_digit = strict_digit
if norm_map is None:
self._norm_map = DEFAULT_NORMALIZE_MAP
else:
self._norm_map = norm_map
if backoff in _BACKOFF_TYPES:
if backoff == 'NONE':
self._backoff_condition = None
self._backoff_action = None
else:
backoff_toks = backoff.split('_')
self._backoff_condition = backoff_toks[0]
self._backoff_action = backoff_toks[1]
else:
raise AnalyzerError('Invalid backoff mode {}'.format(
repr(backoff)))
if isinstance(cache_size, int):
if cache_size > 0:
cache = LFUCache(cache_size)
self.analyze = cached(cache, lock=RLock())(self.analyze)
else:
raise AnalyzerError('Invalid cache size {}'.format(
repr(cache_size)))
def _normalize(self, word):
if self._norm_map is None:
return word
return self._norm_map.map_string(word)
def _combined_analyses(self,
word_dediac,
prefix_analyses,
stem_analyses,
suffix_analyses):
combined = deque()
for p in itertools.product(prefix_analyses, stem_analyses):
prefix_cat = p[0][0]
prefix_feats = p[0][1]
stem_cat = p[1][0]
stem_feats = p[1][1]
if stem_cat in self._db.prefix_stem_compat[prefix_cat]:
for suffix_cat, suffix_feats in suffix_analyses:
if ((stem_cat not in self._db.stem_suffix_compat) or
(prefix_cat not in self._db.prefix_suffix_compat) or
(suffix_cat not in
self._db.stem_suffix_compat[stem_cat]) or
(suffix_cat not in
self._db.prefix_suffix_compat[prefix_cat])):
continue
merged = merge_features(self._db, prefix_feats, stem_feats,
suffix_feats)
merged['stem'] = stem_feats['diac']
merged['stemcat'] = stem_cat
merged_dediac = dediac_ar(merged['diac'])
if word_dediac.replace(u'\u0640', '') != merged_dediac:
merged['source'] = 'spvar'
combined.append(merged)
return combined
def _combined_backoff_analyses(self,
stem,
word_dediac,
prefix_analyses,
stem_analyses,
suffix_analyses):
combined = deque()
for p in itertools.product(prefix_analyses, stem_analyses):
prefix_cat = p[0][0]
prefix_feats = p[0][1]
stem_cat = p[1][0]
stem_feats = copy.copy(p[1][1])
if stem_cat in self._db.prefix_stem_compat[prefix_cat]:
for suffix_cat, suffix_feats in suffix_analyses:
if ((suffix_cat not in
self._db.stem_suffix_compat[stem_cat]) or
(prefix_cat not in self._db.prefix_suffix_compat or
suffix_cat not in
self._db.prefix_suffix_compat[prefix_cat])):
continue
if (self._backoff_action == 'PROP' and
'NOUN_PROP' not in stem_feats['bw']):
continue
stem_feats['bw'] = _NOAN_RE.sub(stem, stem_feats['bw'])
stem_feats['diac'] = _NOAN_RE.sub(stem, stem_feats['diac'])
stem_feats['lex'] = _NOAN_RE.sub(stem, stem_feats['lex'])
stem_feats['caphi'] = simple_ar_to_caphi(stem)
merged = merge_features(self._db, prefix_feats, stem_feats,
suffix_feats)
merged['stem'] = stem_feats['diac']
merged['stemcat'] = stem_cat
merged['source'] = 'backoff'
merged['gloss'] = stem_feats['gloss']
combined.append(merged)
return combined
# pylint: disable=E0202
def analyze(self, word):
"""Analyze a given word.
Args:
word (:py:obj:`str`): Word to analyze.
Returns:
:obj:`list` of :obj:`dict`: The list of analyses for **word**.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
"""
word = word.strip()
if word == '':
return []
analyses = deque()
word_dediac = dediac_ar(word)
word_normal = self._normalize(word_dediac)
if ((self._strict_digit and _is_strict_digit(word)) or
(not self._strict_digit and _is_digit(word))):
result = copy.copy(self._db.defaults['digit'])
result['diac'] = word
result['stem'] = word
result['stemgloss'] = word
result['stemcat'] = None
result['lex'] = word + '_0'
result['bw'] = word + '/NOUN_NUM'
result['source'] = 'digit'
for feat in _COPY_FEATS:
if feat in self._db.defines:
result[feat] = word
for feat in _UNDEFINED_LEX_FEATS:
if feat in self._db.defines:
result[feat] = 'DIGIT'
if 'catib6' in self._db.defines:
result['catib6'] = 'NOM'
if 'ud' in self._db.defines:
result['ud'] = 'NUM'
result['pos_logprob'] = -99.0
result['lex_logprob'] = -99.0
result['pos_lex_logprob'] = -99.0
return [result]
elif _is_punc(word):
result = copy.copy(self._db.defaults['punc'])
result['diac'] = word
result['stem'] = word
result['stemgloss'] = word
result['stemcat'] = None
result['lex'] = word + '_0'
result['bw'] = word + '/PUNC'
result['source'] = 'punc'
for feat in _COPY_FEATS:
if feat in self._db.defines:
result[feat] = word
for feat in _UNDEFINED_LEX_FEATS:
if feat in self._db.defines:
result[feat] = 'PUNC'
if 'catib6' in self._db.defines:
result['catib6'] = 'PNX'
if 'ud' in self._db.defines:
result['ud'] = 'PUNCT'
result['pos_logprob'] = -99.0
result['lex_logprob'] = -99.0
result['pos_lex_logprob'] = -99.0
return [result]
elif _has_punc(word):
pass
elif not _is_ar(word):
result = copy.copy(self._db.defaults['noun'])
result['diac'] = word
result['stem'] = word
result['stemgloss'] = word
result['stemcat'] = None
result['lex'] = word + '_0'
result['bw'] = word + '/FOREIGN'
result['source'] = 'foreign'
for feat in _COPY_FEATS:
if feat in self._db.defines:
result[feat] = word
for feat in _UNDEFINED_LEX_FEATS:
if feat in self._db.defines:
result[feat] = 'FOREIGN'
if 'catib6' in self._db.defines:
result['catib6'] = 'FOREIGN'
if 'ud' in self._db.defines:
result['ud'] = 'X'
result['pos_logprob'] = -99.0
result['lex_logprob'] = -99.0
result['pos_lex_logprob'] = -99.0
return [result]
else:
segments_gen = _segments_gen(word_normal, self._db.max_prefix_size,
self._db.max_suffix_size)
for segmentation in segments_gen:
prefix = segmentation[0]
stem = segmentation[1]
suffix = segmentation[2]
prefix_analyses = self._db.prefix_hash.get(prefix, None)
suffix_analyses = self._db.suffix_hash.get(suffix, None)
if prefix_analyses is None or suffix_analyses is None:
continue
stem_analyses = self._db.stem_hash.get(stem, None)
if stem_analyses is not None:
combined = self._combined_analyses(word_dediac,
prefix_analyses,
stem_analyses,
suffix_analyses)
analyses.extend(combined)
if ((self._backoff_condition == 'NOAN' and len(analyses) == 0) or
(self._backoff_condition == 'ADD')):
segments_gen = _segments_gen(word_normal,
self._db.max_prefix_size,
self._db.max_suffix_size)
backoff_cats = self._db.stem_backoffs[self._backoff_action]
stem_analyses = [(cat, analysis)
for cat, analysis in self._db.stem_hash['NOAN']
if cat in backoff_cats]
for segmentation in segments_gen:
prefix = segmentation[0]
stem = segmentation[1]
suffix = segmentation[2]
prefix_analyses = self._db.prefix_hash.get(prefix, None)
suffix_analyses = self._db.suffix_hash.get(suffix, None)
if prefix_analyses is None | |
"""
Generates plots / figures when run as a script.
Plot files are placed in the :file:`plots` directory.
By default, simply running ``python -m src.plots`` generates **ALL** plots,
which may not be desired. Instead, one can pass a list of plots to generate:
``python -m src.plots plot1 plot2 ...``. The full list of plots is shown in
the usage information ``python -m src.plots --help``.
Typing can be reduced by using shell brace expansion, e.g. ``python -m
src.plots observables_{design,posterior}`` for both ``observables_design`` and
``observables_posterior``. In addition, plots may be given as paths to plot
filenames, which enables shell globbing, e.g. ``python -m src.plots
plots/observables_*``.
In the code, each plot is generated by a function tagged with the ``@plot``
decorator.
"""
from collections import OrderedDict
import itertools
import logging
from pathlib import Path
import subprocess
import tempfile
import warnings
import h5py
import hsluv
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import lines
from matplotlib import patches
from matplotlib import ticker
from scipy import special
from scipy.interpolate import PchipInterpolator
from sklearn.decomposition import PCA
from sklearn.gaussian_process import GaussianProcessRegressor as GPR
from sklearn.gaussian_process import kernels
#from . import workdir, systems, parse_system, expt, model, mcmc
#from .design import Design
#from .emulator import emulators
fontsize = dict(
large=11,
normal=10,
small=9,
tiny=8,
)
# new tableau colors
# https://www.tableau.com/about/blog/2016/7/colors-upgrade-tableau-10-56782
colors = OrderedDict([
('blue', '#4e79a7'),
('orange', '#f28e2b'),
('green', '#59a14f'),
('red', '#e15759'),
('cyan', '#76b7b2'),
('purple', '#b07aa1'),
('brown', '#9c755f'),
('yellow', '#edc948'),
('pink', '#ff9da7'),
('gray', '#bab0ac')
])
offblack = '.15'
plt.rcdefaults()
plt.rcParams.update({
'font.family': 'sans-serif',
'font.sans-serif': ['Lato'],
'mathtext.fontset': 'custom',
'mathtext.default': 'it',
'mathtext.rm': 'sans',
'mathtext.it': 'sans:italic:medium',
'mathtext.cal': 'sans',
'font.size': fontsize['normal'],
'legend.fontsize': fontsize['normal'],
'axes.labelsize': fontsize['normal'],
'axes.titlesize': fontsize['large'],
'xtick.labelsize': fontsize['small'],
'ytick.labelsize': fontsize['small'],
#'font.weight': 400,
#'axes.labelweight': 400,
#'axes.titleweight': 400,
'axes.prop_cycle': plt.cycler('color', list(colors.values())),
'lines.linewidth': .8,
'lines.markersize': 3,
'lines.markeredgewidth': 0,
'patch.linewidth': .8,
'axes.linewidth': .6,
'xtick.major.width': .6,
'ytick.major.width': .6,
'xtick.minor.width': .4,
'ytick.minor.width': .4,
'xtick.major.size': 3.,
'ytick.major.size': 3.,
'xtick.minor.size': 2.,
'ytick.minor.size': 2.,
'xtick.major.pad': 3.5,
'ytick.major.pad': 3.5,
'axes.labelpad': 4.,
'axes.formatter.limits': (-5, 5),
'axes.spines.top': False,
'axes.spines.right': False,
'text.color': offblack,
'axes.edgecolor': offblack,
'axes.labelcolor': offblack,
'xtick.color': offblack,
'ytick.color': offblack,
'legend.frameon': False,
'image.cmap': 'Blues',
'image.interpolation': 'none',
})
plotdir = workdir / 'plots'
plotdir.mkdir(exist_ok=True)
plot_functions = {}
def plot(f):
"""
Plot function decorator. Calls the function, does several generic tasks,
and saves the figure as the function name.
"""
def wrapper(*args, **kwargs):
logging.info('generating plot: %s', f.__name__)
f(*args, **kwargs)
fig = plt.gcf()
if not fig.get_tight_layout():
set_tight(fig)
plotfile = plotdir / '{}.pdf'.format(f.__name__)
fig.savefig(str(plotfile))
logging.info('wrote %s', plotfile)
plt.close(fig)
plot_functions[f.__name__] = wrapper
return wrapper
def figsize(relwidth=1, aspect=.618, refwidth=6):
"""
Return figure dimensions from a relative width (to a reference width) and
aspect ratio (default: 1/golden ratio).
"""
width = relwidth * refwidth
return width, width*aspect
def set_tight(fig=None, **kwargs):
"""
Set tight_layout with a better default pad.
"""
if fig is None:
fig = plt.gcf()
kwargs.setdefault('pad', .1)
fig.set_tight_layout(kwargs)
def auto_ticks(ax, axis='both', minor=False, **kwargs):
"""
Convenient interface to matplotlib.ticker locators.
"""
axis_list = []
if axis in {'x', 'both'}:
axis_list.append(ax.xaxis)
if axis in {'y', 'both'}:
axis_list.append(ax.yaxis)
for axis in axis_list:
axis.get_major_locator().set_params(**kwargs)
if minor:
axis.set_minor_locator(ticker.AutoMinorLocator(minor))
def format_system(system):
"""
Format a system string into a display name, e.g.:
>>> format_system('PbPb2760')
'Pb-Pb 2.76 TeV'
>>> format_system('AuAu200')
'Au-Au 200 GeV'
"""
proj, energy = parse_system(system)
if energy > 1000:
energy /= 1000
prefix = 'T'
else:
prefix = 'G'
return '{} {} {}eV'.format('-'.join(proj), energy, prefix)
def darken(rgb, amount=.5):
"""
Darken a color by the given amount in HSLuv space.
"""
H, S, L = hsluv.rgb_to_hsluv(rgb)
return hsluv.hsluv_to_rgb((H, S, (1 - amount)*L))
def obs_color_hsluv(obs, subobs):
"""
Return a nice color for the given observable in HSLuv space.
Use obs_color() to obtain an RGB color.
"""
if obs in {'dNch_deta', 'pT_fluct'}:
return 250, 90, 55
if obs == 'dET_deta':
return 10, 65, 55
if obs in {'dN_dy', 'mean_pT'}:
return dict(
pion=(210, 85, 70),
kaon=(130, 88, 68),
proton=(30, 90, 62),
)[subobs]
if obs == 'vnk':
return {
(2, 2): (230, 90, 65),
(2, 4): (262, 80, 63),
(3, 2): (150, 90, 67),
(4, 2): (310, 70, 50),
}[subobs]
raise ValueError('unknown observable: {} {}'.format(obs, subobs))
def obs_color(obs, subobs):
"""
Return a nice color for the given observable.
"""
return hsluv.hsluv_to_rgb(obs_color_hsluv(obs, subobs))
def obs_label(obs, subobs, differentials=False, full_cumulants=False):
"""
Return a formatted label for the given observable.
"""
if obs.startswith('d') and obs.endswith('_deta'):
return (r'$d{}/d\eta$' if differentials else '${}$').format(
{'Nch': r'N_\mathrm{ch}', 'ET': r'E_T'}[obs[1:-5]])
id_parts_labels = {
'dN_dy': '$dN_{}/dy$' if differentials else '$N_{}$',
'mean_pT': r'$\langle p_T^{} \rangle$'
}
if obs in id_parts_labels:
return id_parts_labels[obs].format(
{'pion': '\pi', 'kaon': 'K', 'proton': 'p'}[subobs]
)
if obs == 'pT_fluct':
return r'$\delta p_T/\langle p_T \rangle$'
if obs == 'vnk':
n, k = subobs
return '$v_{}{}$'.format(
n,
(r'\{' + str(k) + r'\}') if full_cumulants else ''
)
def _observables_plots():
"""
Metadata for observables plots.
"""
def id_parts_plots(obs):
return [(obs, species, dict(label=label)) for species, label in [
('pion', '$\pi$'), ('kaon', '$K$'), ('proton', '$p$')
]]
return [
dict(
title='Yields',
ylabel=(
r'$dN_\mathrm{ch}/d\eta,\ dN/dy,\ dE_T/d\eta\ [\mathrm{GeV}]$'
),
ylim=(1, 1e5),
yscale='log',
height_ratio=1.5,
subplots=[
('dNch_deta', None, dict(label=r'$N_\mathrm{ch}$', scale=25)),
('dET_deta', None, dict(label=r'$E_T$', scale=5)),
*id_parts_plots('dN_dy')
]
),
dict(
title='Mean $p_T$',
ylabel=r'$\langle p_T \rangle$ [GeV]',
ylim=(0, 1.7),
subplots=id_parts_plots('mean_pT')
),
dict(
title='Mean $p_T$ fluctuations',
ylabel=r'$\delta p_T/\langle p_T \rangle$',
ylim=(0, .04),
subplots=[('pT_fluct', None, dict())]
),
dict(
title='Flow cumulants',
ylabel=r'$v_n\{2\}$',
ylim=(0, .12),
subplots=[
('vnk', (n, 2), dict(label='$v_{}$'.format(n)))
for n in [2, 3, 4]
]
)
]
def _observables(posterior=False):
"""
Model observables at all design points or drawn from the posterior with
experimental data points.
"""
plots = _observables_plots()
fig, axes = plt.subplots(
nrows=len(plots), ncols=len(systems),
figsize=figsize(1.1, aspect=1.25),
gridspec_kw=dict(
height_ratios=[p.get('height_ratio', 1) for p in plots]
)
)
if posterior:
samples = mcmc.Chain().samples(100)
for (plot, system), ax in zip(
itertools.product(plots, systems), axes.flat
):
for obs, subobs, opts in plot['subplots']:
color = obs_color(obs, subobs)
scale = opts.get('scale')
x = model.data[system][obs][subobs]['x']
Y = (
samples[system][obs][subobs]
if posterior else
model.data[system][obs][subobs]['Y']
)
if scale is not None:
Y = Y*scale
for y in Y:
ax.plot(x, y, color=color, alpha=.08, lw=.3)
if 'label' in opts:
ax.text(
x[-1] + 3,
np.median(Y[:, -1]),
opts['label'],
color=darken(color), ha='left', va='center'
)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
y = dset['y']
yerr = np.sqrt(sum(
e**2 for e in dset['yerr'].values()
))
if scale is not None:
y = y*scale
yerr = yerr*scale
ax.errorbar(
x, y, yerr=yerr, fmt='o',
capsize=0, color='.25', zorder=1000
)
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
ax.set_xlim(0, 80)
auto_ticks(ax, 'x', nbins=5, minor=2)
ax.set_ylim(plot['ylim'])
if ax.is_first_row():
ax.set_title(format_system(system))
elif ax.is_last_row():
ax.set_xlabel('Centrality %')
if ax.is_first_col():
ax.set_ylabel(plot['ylabel'])
if ax.is_last_col():
ax.text(
1.02, .5, plot['title'],
transform=ax.transAxes, ha='left', va='center',
size=plt.rcParams['axes.labelsize'], rotation=-90
)
set_tight(fig, rect=[0, 0, .97, 1])
@plot
def observables_design():
_observables(posterior=False)
@plot
def observables_posterior():
_observables(posterior=True)
@plot
def observables_map():
"""
Model observables and ratio to experiment at the maximum a posteriori
(MAP) estimate.
"""
plots = _observables_plots()
ylim = {
'Yields': (2, 1e5),
'Flow cumulants': (0, .15),
'Mean $p_T$': (0, 1.7),
'Mean $p_T$ fluctuations': (0, .045),
}
for n, p in enumerate(plots):
p['ylim'] = ylim[p['title']]
if p['title'] == 'Flow cumulants':
move_index = n
p.update(
ylabel=r'$v_n\{k\}$',
subplots=[
('vnk', nk, dict(label='$v_{}\{{{}\}}$'.format(*nk)))
for nk in [(2, 2), (2, 4), (3, 2), (4, 2)]
],
legend=True
)
plots.insert(1, plots.pop(move_index))
ncols = int(len(plots)/2)
fig, axes = plt.subplots(
nrows=4, ncols=ncols,
figsize=figsize(1.1, aspect=2/ncols),
gridspec_kw=dict(
height_ratios=list(itertools.chain.from_iterable(
(p.get('height_ratio', 1), .4) for p in plots[::ncols]
))
)
)
labels = {}
handles = dict(expt={}, model={})
for plot, ax, ratio_ax in zip(plots, axes[::2].flat, axes[1::2].flat):
for system, (obs, subobs, opts) in itertools.product(
systems, plot['subplots']
):
color = obs_color(obs, subobs)
scale = opts.get('scale')
linestyle, fill_markers = {
'PbPb2760': ('solid', True),
'PbPb5020': ('dashed', False),
}[system]
x = model.map_data[system][obs][subobs]['x']
y = model.map_data[system][obs][subobs]['Y']
if scale is not None:
y = y*scale
ax.plot(x, y, color=color, ls=linestyle)
handles['model'][system] = \
lines.Line2D([], [], color=offblack, ls=linestyle)
if 'label' in opts and (obs, subobs) not in labels:
labels[obs, subobs] = ax.text(
x[-1] + 3, y[-1],
opts['label'],
color=darken(color), ha='left', va='center'
)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
yexp = dset['y']
yerr = dset['yerr']
yerrstat = yerr.get('stat')
yerrsys = yerr.get('sys', yerr.get('sum'))
if scale is not None:
yexp = yexp*scale
if yerrstat is not None:
yerrstat = yerrstat*scale
if yerrsys is not None:
yerrsys = yerrsys*scale
c = '.25'
handles['expt'][system] = ax.errorbar(
x, yexp, yerr=yerrstat, fmt='o',
capsize=0, color=c,
mec=c, mfc=(c if fill_markers else '.9'),
mew=((.2 if fill_markers else .5) *
plt.rcParams['lines.linewidth']),
zorder=1000
)
ax.fill_between(
x, yexp - yerrsys, yexp + yerrsys,
facecolor='.9', zorder=-10,
)
ratio_ax.plot(x, y/yexp, color=color, ls=linestyle)
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
for | |
for all in k6_beschreibung:
x = eval("self.cb_k6_" + all)
x.setChecked(False)
def btn_k7_pressed(self):
if self.cb_k7_dr.isChecked() == False:
for all in k7_beschreibung:
x = eval("self.cb_k7_" + all)
x.setChecked(True)
elif self.cb_k7_dr.isChecked() == True:
for all in k7_beschreibung:
x = eval("self.cb_k7_" + all)
x.setChecked(False)
def btn_k8_pressed(self):
if self.cb_k8_ddg.isChecked() == False:
for all in k8_beschreibung:
x = eval("self.cb_k8_" + all)
x.setChecked(True)
elif self.cb_k8_ddg.isChecked() == True:
for all in k8_beschreibung:
x = eval("self.cb_k8_" + all)
x.setChecked(False)
def btn_ag_all_pressed(self):
if self.cb_ag11.isChecked() == False:
for all in ag_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(True)
elif self.cb_ag11.isChecked() == True:
for all in ag_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(False)
def btn_an_all_pressed(self):
if self.cb_an11.isChecked() == False:
for all in an_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(True)
elif self.cb_an11.isChecked() == True:
for all in an_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(False)
def btn_fa_all_pressed(self):
if self.cb_fa11.isChecked() == False:
for all in fa_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(True)
elif self.cb_fa11.isChecked() == True:
for all in fa_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(False)
def btn_ws_all_pressed(self):
if self.cb_ws11.isChecked() == False:
for all in ws_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(True)
elif self.cb_ws11.isChecked() == True:
for all in ws_beschreibung:
x = eval("self.cb_" + all)
x.setChecked(False)
# def cb_checked(self):
# set_chosen_gk=set([])
def cb_checked(self):
chosen_gk = []
list_gk = ["ag", "fa", "an", "ws"]
for thema in list_gk:
exec("set_chosen_gk_%s=set([])" % thema)
for all in eval("%s_beschreibung" % thema):
x = eval("self.cb_" + all)
if x.isChecked() == True:
eval("set_chosen_gk_%s.add(x.text())" % thema)
eval("chosen_gk.extend(sorted(set_chosen_gk_%s))" % thema)
x = ", ".join(chosen_gk)
self.label_gk.setText(_translate("MainWindow", str(x), None))
def comboBox_pruefungstyp_changed(self):
if (
self.comboBox_pruefungstyp.currentText() == "Grundkompetenzcheck"
or self.comboBox_pruefungstyp.currentText() == "Übungsblatt"
):
self.radioButton_beurteilungsraster.setEnabled(False)
self.radioButton_notenschl.setEnabled(False)
self.groupBox_notenschl.setEnabled(False)
self.groupBox_beurteilungsra.setEnabled(False)
self.pushButton_titlepage.setEnabled(False)
else:
self.radioButton_beurteilungsraster.setEnabled(True)
self.radioButton_notenschl.setEnabled(True)
self.groupBox_notenschl.setEnabled(True)
self.groupBox_beurteilungsra.setEnabled(True)
self.pushButton_titlepage.setEnabled(True)
def cb_rest_checked(self):
set_chosen_gk = set([])
for all in k5_beschreibung:
x = eval("self.cb_k5_" + all)
if x.isChecked() == True:
set_chosen_gk.add(all.upper() + "(5)")
for all in k6_beschreibung:
x = eval("self.cb_k6_" + all)
if x.isChecked() == True:
set_chosen_gk.add(all.upper() + "(6)")
for all in k7_beschreibung:
x = eval("self.cb_k7_" + all)
if x.isChecked() == True:
set_chosen_gk.add(all.upper() + "(7)")
for all in k8_beschreibung:
x = eval("self.cb_k8_" + all)
if x.isChecked() == True:
set_chosen_gk.add(all.upper() + "(8)")
if len(set_chosen_gk) > 6:
x = ", ".join(list(sorted(set_chosen_gk))[:6])
x = x + ", ..."
else:
x = ", ".join(sorted(set_chosen_gk))
if len(set_chosen_gk) > 0:
x = "Weitere: " + x
self.label_gk_rest.setText(_translate("MainWindow", str(x), None))
############################################################################
############################################################################
######### Button REFRESH DATABASE ######################################
############################################################################
def modification_date(self, filename):
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def refresh_ddb(self):
msg = QtWidgets.QMessageBox()
msg.setWindowIcon(QtGui.QIcon(logo_path))
msg.setWindowTitle("Refresh Database")
msg.setStandardButtons(QtWidgets.QMessageBox.NoButton)
msg.setText("Datenbank wird aktualisiert. Bitte warten...")
msg.show()
QApplication.processEvents()
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
for selected_aufgabentyp in [1, 2]:
beispieldaten_dateipfad = {}
beispieldaten = []
chosen_aufgabenformat = "Typ%sAufgaben" % selected_aufgabentyp
########################################################
##### Suche offizielle Beispiele ####################
##################################################
for root, dirs, files in os.walk(
os.path.join(path_programm, "_database", chosen_aufgabenformat)
):
for all in files:
if all.endswith(".tex") or all.endswith(".ltx"):
if not ("Gesamtdokument" in all) and not (
"Teildokument" in all
):
file = open(os.path.join(root, all), encoding="utf8")
for i, line in enumerate(file):
if not line == "\n":
beispieldaten_dateipfad[line] = os.path.join(
root, all
)
beispieldaten.append(line)
break
file.close()
################################################
#### Suche inoffizielle Beispiele ######
#############################################
for root, dirs, files in os.walk(
os.path.join(
path_programm, "_database_inoffiziell", chosen_aufgabenformat
)
):
for all in files:
if all.endswith(".tex") or all.endswith(".ltx"):
if not ("Gesamtdokument" in all) and not (
"Teildokument" in all
):
file = open(os.path.join(root, all), encoding="utf8")
for i, line in enumerate(file):
if not line == "\n":
beispieldaten_dateipfad[line] = os.path.join(
root, all
)
beispieldaten.append(line)
break
file.close()
temp_dict_beispieldaten = {}
temp_list = list(beispieldaten_dateipfad.keys())
temp_list.sort(key=natural_keys)
for all in temp_list:
temp_dict_beispieldaten.update({all: beispieldaten_dateipfad[all]})
beispieldaten_dateipfad = temp_dict_beispieldaten
log_file = os.path.join(
path_programm, "Teildokument", "log_file_%s" % selected_aufgabentyp
)
try:
with open(log_file, "w+", encoding="utf8") as f:
json.dump(beispieldaten_dateipfad, f, ensure_ascii=False)
except FileNotFoundError:
os.makedirs(os.path.join(path_programm, "Teildokument"))
with open(log_file, "w+", encoding="utf8") as f:
json.dump(beispieldaten_dateipfad, f, ensure_ascii=False)
self.label_update.setText(
_translate(
"MainWindow",
"Last Update: "
+ self.modification_date(log_file).strftime("%d.%m.%y - %H:%M"),
None,
)
)
QtWidgets.QApplication.restoreOverrideCursor()
msg.close()
############################################################################
############################################################################
########################### CREATE PDF ####################################
############################################################################
def PrepareTeXforPDF(self):
chosen_aufgabenformat = "Typ%sAufgaben" % self.label_aufgabentyp.text()[-1]
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
if not os.path.isfile(
os.path.join(
path_programm,
"Teildokument",
"log_file_%s" % self.label_aufgabentyp.text()[-1],
)
):
self.refresh_ddb() # self.label_aufgabentyp.text()[-1]
else: ## Automatic update once per month
log_file = os.path.join(
path_programm,
"Teildokument",
"log_file_%s" % self.label_aufgabentyp.text()[-1],
)
month_update_log_file = self.modification_date(log_file).strftime("%m")
month_today = datetime.date.today().strftime("%m")
if month_today != month_update_log_file:
self.refresh_ddb() # self.label_aufgabentyp.text()[-1]
suchbegriffe = []
#### ALGEBRA UND GEOMETRIE
for all in ag_beschreibung:
x = eval("self.cb_" + all)
if x.isChecked() == True:
suchbegriffe.append(all)
#### ANALYSIS
for all in an_beschreibung:
x = eval("self.cb_" + all)
if x.isChecked() == True:
suchbegriffe.append(all)
#### FUNKTIONALE ABHÄNGIGKEITEN
for all in fa_beschreibung:
x = eval("self.cb_" + all)
if x.isChecked() == True:
suchbegriffe.append(all)
#### WAHRSCHEINLICHKEIT UND STATISTIK
for all in ws_beschreibung:
x = eval("self.cb_" + all)
if x.isChecked() == True:
suchbegriffe.append(all)
temp_suchbegriffe = []
for all in suchbegriffe:
temp_suchbegriffe.append(dict_gk[all])
suchbegriffe = temp_suchbegriffe
#### Suche der Schulstufe
for y in range(5, 9):
themen_klasse = eval("k%s_beschreibung" % y)
for all in themen_klasse:
x = eval("self.cb_k%s_" % y + all)
grade = "K" + str(y)
if x.isChecked() == True:
# if grade not in suchbegriffe:
# suchbegriffe.append('K'+str(y))
suchbegriffe.append(all.upper())
#### typ1 ###
# log_file=os.path.join(path_programm,'Typ 2 Aufgaben','Teildokument','log_file')
######
log_file = os.path.join(
path_programm,
"Teildokument",
"log_file_%s" % self.label_aufgabentyp.text()[-1],
)
with open(log_file, encoding="utf8") as f:
beispieldaten_dateipfad = json.load(f)
# beispieldaten_dateipfad=eval(beispieldaten_dateipfad)
beispieldaten = list(beispieldaten_dateipfad.keys())
######### new tabu.sty not working ###
######################################################
########### work around ####################
#########################################
path_tabu_pkg = os.path.join(path_programm, "_database", "_config", "tabu.sty")
copy_path_tabu_pkg = os.path.join(path_programm, "Teildokument", "tabu.sty")
if os.path.isfile(copy_path_tabu_pkg):
pass
else:
shutil.copy(path_tabu_pkg, copy_path_tabu_pkg)
########################################################
filename_teildokument = os.path.join(
path_programm,
"Teildokument",
"Teildokument_%s.tex" % self.label_aufgabentyp.text()[-1],
)
try:
file = open(filename_teildokument, "w", encoding="utf8")
except FileNotFoundError:
os.makedirs(filename_teildokument) # If dir is not found make it recursivly
file.write(
"\documentclass[a4paper,12pt]{report}\n\n"
"\\usepackage{geometry}\n"
"\geometry{a4paper,left=18mm,right=18mm, top=2cm, bottom=2cm}\n\n"
"\\usepackage{lmodern}\n"
"\\usepackage[T1]{fontenc}\n"
"\\usepackage{eurosym}\n"
"\\usepackage[utf8]{inputenc}\n"
"\\usepackage[ngerman]{babel}\n"
)
if self.cb_solution.isChecked() == True:
file.write("\\usepackage[solution_on]{srdp-mathematik} % solution_on/off\n")
else:
file.write(
"\\usepackage[solution_off]{srdp-mathematik} % solution_on/off\n"
)
file.write(
"\setcounter{Zufall}{0}\n\n\n"
"\pagestyle{empty} %PAGESTYLE: empty, plain, fancy\n"
"\onehalfspacing %Zeilenabstand\n"
"\setcounter{secnumdepth}{-1} % keine Nummerierung der Ueberschriften\n\n\n\n"
"%\n"
"%\n"
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% DOKUMENT - ANFANG %%%%%%%%%%%%%%%%%%%"
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
"%\n"
"%\n"
"\\begin{document}\n"
'\shorthandoff{"}\n'
)
file.close()
#### Typ1 ####
# if self.combobox_searchtype.currentText()=='Alle Dateien ausgeben, die alle Suchkriterien enthalten':
#######
if (
self.combobox_searchtype.currentText()
== "Alle Dateien ausgeben, die ausschließlich diese Suchkriterien enthalten"
and chosen_aufgabenformat == "Typ2Aufgaben"
):
liste_kompetenzbereiche = {}
gkliste = []
r = 1
for all in list(beispieldaten_dateipfad.keys()):
gkliste = []
for gkbereich in dict_gk:
if dict_gk[gkbereich] in all:
gkliste.append(dict_gk[gkbereich])
liste_kompetenzbereiche.update({r: gkliste})
r += 1
for r in range(1, len(liste_kompetenzbereiche) + 1):
if liste_kompetenzbereiche[r] == []:
liste_kompetenzbereiche[r].append("-")
for all in suchbegriffe:
if all in liste_kompetenzbereiche[r]:
liste_kompetenzbereiche[r].remove(all)
gesammeltedateien = []
gesammeltedateien_temporary = []
for r in range(1, len(liste_kompetenzbereiche) + 1):
if liste_kompetenzbereiche[r] == []:
gesammeltedateien.append(
list(beispieldaten_dateipfad.keys())[r - 1]
)
# return
# for all in gesammeltedateien:
# if entry_suchbegriffe.get().lower() in all.lower():
# gesammeltedateien_temporary.append(all)
gesammeltedateien = sorted(gesammeltedateien)
# print(liste_kompetenzbereiche)
# print(gesammeltedateien)
# return
# gesammeltedateien=list(beispieldaten_dateipfad.keys())
# for item in suchbegriffe:
# for all in gesammeltedateien[:]:
# if item not in all:
# gesammeltedateien.remove(all)
# dict_gesammeltedateien={}
# for all in gesammeltedateien:
# dict_gesammeltedateien[all]=beispieldaten_dateipfdad[all]
if (
self.combobox_searchtype.currentText()
== "Alle Dateien ausgeben, die zumindest ein Suchkriterium enthalten"
or chosen_aufgabenformat == "Typ1Aufgaben"
):
gesammeltedateien = []
for all in suchbegriffe:
for element in list(beispieldaten_dateipfad.keys())[:]:
if all in element:
gesammeltedateien.append(element)
if not len(self.entry_suchbegriffe.text()) == 0:
suchbegriffe.append(self.entry_suchbegriffe.text())
if (
self.combobox_searchtype.currentText()
== "Alle Dateien ausgeben, die zumindest ein Suchkriterium enthalten"
or chosen_aufgabenformat == "Typ1Aufgaben"
):
if len(gesammeltedateien) == 0 and len(suchbegriffe) != 0:
gesammeltedateien = list(beispieldaten_dateipfad.keys())
for all in gesammeltedateien[:]:
if self.entry_suchbegriffe.text().lower() not in all.lower():
gesammeltedateien.remove(all)
# if not len(self.entry_suchbegriffe.text())==0:
# suchbegriffe.append(self.entry_suchbegriffe.text())
gesammeltedateien.sort(key=natural_keys)
dict_gesammeltedateien = {}
for all in gesammeltedateien:
dict_gesammeltedateien[all] = beispieldaten_dateipfad[all]
# print(dict_gesammeltedateien)
# return
#### typ1 ###
# ###############################################
# #### Auswahl der gesuchten Antwortformate ####
# ###############################################
if chosen_aufgabenformat == "Typ1Aufgaben":
if (
self.cb_af_mc.isChecked()
or self.cb_af_lt.isChecked()
or self.cb_af_zo.isChecked()
or self.cb_af_oa.isChecked() == True
):
if suchbegriffe == []:
dict_gesammeltedateien = beispieldaten_dateipfad
for all_formats in list(dict_aufgabenformate.keys()):
x = eval("self.cb_af_" + all_formats)
if x.isChecked() == False:
for all in list(dict_gesammeltedateien):
if all_formats.upper() in all:
del dict_gesammeltedateien[all]
# if all_formats in all:
# del dict_gesammeltedateien[all]
if x.isChecked() == True:
suchbegriffe.append(all_formats)
########################################################
###############################################
#### Auswahl der gesuchten Klassen #########
###############################################
selected_klassen = []
if (
self.cb_k5.isChecked()
or self.cb_k6.isChecked()
or self.cb_k7.isChecked()
or self.cb_k8.isChecked() == True
or self.cb_mat.isChecked() == True
):
| |
"""
Module containing functions and classes related to Spectra calculation and
manipulation
Spectra are calculated from the windowed, decimated time data. The inbuilt
Fourier transform implementation is inspired by the implementation of the
scipy stft function.
"""
from loguru import logger
from pathlib import Path
from typing import Union, Tuple, Dict, List, Any, Optional
from pydantic import PositiveInt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from resistics.common import ResisticsData, ResisticsProcess, History
from resistics.common import ResisticsWriter, Metadata, WriteableMetadata
from resistics.sampling import HighResDateTime
from resistics.time import ChanMetadata
from resistics.decimate import DecimationParameters
from resistics.window import WindowedData, WindowedLevelMetadata
class SpectraLevelMetadata(Metadata):
"""Metadata for spectra of a windowed decimation level"""
fs: float
"""The sampling frequency of the decimation level"""
n_wins: int
"""The number of windows"""
win_size: PositiveInt
"""The window size in samples"""
olap_size: PositiveInt
"""The overlap size in samples"""
index_offset: int
"""The global window offset for local window 0"""
n_freqs: int
"""The number of frequencies in the frequency data"""
freqs: List[float]
"""List of frequencies"""
@property
def nyquist(self) -> float:
"""Get the nyquist frequency"""
return self.fs / 2
class SpectraMetadata(WriteableMetadata):
"""Metadata for spectra data"""
fs: List[float]
chans: List[str]
n_chans: Optional[int] = None
n_levels: int
first_time: HighResDateTime
last_time: HighResDateTime
system: str = ""
serial: str = ""
wgs84_latitude: float = -999.0
wgs84_longitude: float = -999.0
easting: float = -999.0
northing: float = -999.0
elevation: float = -999.0
chans_metadata: Dict[str, ChanMetadata]
levels_metadata: List[SpectraLevelMetadata]
ref_time: HighResDateTime
history: History = History()
class Config:
extra = "ignore"
class SpectraData(ResisticsData):
"""
Class for holding spectra data
The spectra data is stored in the class as a dictionary mapping decimation
level to numpy array. The shape of the array for each decimation level is:
n_wins x n_chans x n_freqs
"""
def __init__(self, metadata: SpectraMetadata, data: Dict[int, np.ndarray]):
"""
Initialise spectra data
Parameters
----------
metadata : SpectraMetadata
Metadata for the spectra data
data : Dict[int, np.ndarray]
Dictionary of data, one entry for each evaluation level
"""
logger.debug(f"Creating SpectraData with data type {data[0].dtype}")
self.metadata = metadata
self.data = data
def get_level(self, level: int) -> np.ndarray:
"""Get the spectra data for a decimation level"""
if level >= self.metadata.n_levels:
raise ValueError(f"Level {level} not <= max {self.metadata.n_levels - 1}")
return self.data[level]
def get_chan(self, level: int, chan: str) -> np.ndarray:
"""Get the channel spectra data for a decimation level"""
from resistics.errors import ChannelNotFoundError
if chan not in self.metadata.chans:
raise ChannelNotFoundError(chan, self.metadata.chans)
idx = self.metadata.chans.index(chan)
return self.data[level][..., idx, :]
def get_chans(self, level: int, chans: List[str]) -> np.ndarray:
"""Get the channels spectra data for a decimation level"""
from resistics.errors import ChannelNotFoundError
for chan in chans:
if chan not in self.metadata.chans:
raise ChannelNotFoundError(chan, self.metadata.chans)
indices = [self.metadata.chans.index(chan) for chan in chans]
return self.data[level][..., indices, :]
def get_freq(self, level: int, idx: int) -> np.ndarray:
"""Get the spectra data at a frequency index for a decimation level"""
n_freqs = self.metadata.levels_metadata[level].n_freqs
if idx < 0 or idx >= n_freqs:
raise ValueError(f"Freq. index {idx} not 0 <= idx < {n_freqs}")
return np.squeeze(self.data[level][..., idx])
def get_mag_phs(
self, level: int, unwrap: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""Get magnitude and phase for a decimation level"""
spec = self.data[level]
if unwrap:
return np.absolute(spec), np.unwrap(np.angle(spec))
return np.absolute(spec), np.angle(spec)
def get_timestamps(self, level: int) -> pd.DatetimeIndex:
"""
Get the start time of each window
Note that this does not use high resolution timestamps
Parameters
----------
level : int
The decimation level
Returns
-------
pd.DatetimeIndex
The starts of each window
Raises
------
ValueError
If the level is out of range
"""
from resistics.window import get_win_starts
if level >= self.metadata.n_levels:
raise ValueError(f"Level {level} not <= max {self.metadata.n_levels - 1}")
level_metadata = self.metadata.levels_metadata[level]
return get_win_starts(
self.metadata.ref_time,
level_metadata.win_size,
level_metadata.olap_size,
level_metadata.fs,
level_metadata.n_wins,
level_metadata.index_offset,
)
def plot(self, max_pts: Optional[int] = 10_000) -> go.Figure:
"""
Stack spectra data for all decimation levels
Parameters
----------
max_pts : Optional[int], optional
The maximum number of points in any individual plot before applying
lttbc downsampling, by default 10_000. If set to None, no
downsampling will be applied.
Returns
-------
go.Figure
The plotly figure
"""
from resistics.plot import get_spectra_stack_fig
y_labels = {x: "Magnitude" for x in self.metadata.chans}
fig = get_spectra_stack_fig(self.metadata.chans, y_labels)
colors = iter(px.colors.qualitative.Plotly)
for ilevel in range(self.metadata.n_levels):
level_metadata = self.metadata.levels_metadata[ilevel]
freqs = np.array(level_metadata.freqs)
stack = np.mean(np.absolute(self.data[ilevel]), axis=0)
legend = f"{ilevel} - {level_metadata.fs:.4f} Hz"
fig = self._add_stack_data(
fig, freqs, stack, legend, color=next(colors), max_pts=max_pts
)
return fig
def plot_level_stack(
self,
level: int,
max_pts: int = 10_000,
grouping: Optional[str] = None,
offset: str = "0h",
) -> go.Figure:
"""
Stack the spectra for a decimation level with optional time grouping
Parameters
----------
level : int
The decimation level
max_pts : int, optional
The maximum number of points in any individual plot before applying
lttbc downsampling, by default 10_000
grouping : Optional[str], optional
A grouping interval as a pandas freq string, by default None
offset : str, optional
A time offset to add to the grouping, by default "0h". For instance,
to plot night time and day time spectra, set grouping to "12h" and
offset to "6h"
Returns
-------
go.Figure
The plotly figure
"""
from resistics.plot import get_spectra_stack_fig
if grouping is None:
first_date = pd.Timestamp(self.metadata.first_time.isoformat()).floor("D")
last_date = pd.Timestamp(self.metadata.last_time.isoformat()).ceil("D")
grouping = last_date - first_date
level_metadata = self.metadata.levels_metadata[level]
df = pd.DataFrame(
data=np.arange(level_metadata.n_wins),
index=self.get_timestamps(level),
columns=["local"],
)
# group by the grouping frequency, iterate over the groups and plot
freqs = np.array(level_metadata.freqs)
y_labels = {x: "Magnitude" for x in self.metadata.chans}
fig = get_spectra_stack_fig(self.metadata.chans, y_labels)
colors = iter(px.colors.qualitative.Plotly)
for idx, group in df.groupby(pd.Grouper(freq=grouping, offset=offset)):
stack = np.mean(np.absolute(self.data[level][group["local"]]), axis=0)
fig = self._add_stack_data(
fig, freqs, stack, str(idx), color=next(colors), max_pts=max_pts
)
return fig
def _add_stack_data(
self,
fig: go.Figure,
freqs: np.ndarray,
data: np.ndarray,
legend: str,
color: str = "blue",
max_pts: Optional[int] = 10_000,
) -> go.Figure:
"""
Add stacked spectra data to a plot
Parameters
----------
fig : go.Figure
The figure to add to
freqs : np.ndarray
Frequencies
data : np.ndarray
The magnitude data
legend : str
The legend string for the data
color : str, optional
The color to plot the line, by default "blue"
max_pts : Optional[int], optional
Maximum number of points to plot, by default 10_000. If the number
of samples in the data is above this, it will be downsampled
Returns
-------
go.Figure
Plotly figure
"""
from resistics.plot import apply_lttb
n_chans = data.shape[0]
for idx in range(n_chans):
indices, chan_data = apply_lttb(data[idx, :], max_pts)
chan_freqs = freqs[indices]
scatter = go.Scattergl(
x=chan_freqs,
y=chan_data,
line=dict(color=color),
name=legend,
legendgroup=legend,
showlegend=(idx == 0),
)
fig.add_trace(scatter, row=idx + 1, col=1)
return fig
def plot_level_section(self, level: int, grouping="30T") -> go.Figure:
"""
Plot a spectra section
Parameters
----------
level : int
The decimation level to plot
grouping : str, optional
The time domain resolution, by default "30T"
Returns
-------
go.Figure
A plotly figure
"""
from resistics.plot import get_spectra_section_fig
level_metadata = self.metadata.levels_metadata[level]
df = pd.DataFrame(
data=np.arange(level_metadata.n_wins),
index=self.get_timestamps(level),
columns=["local"],
)
fig = get_spectra_section_fig(self.metadata.chans)
colorbar_len = 0.90 / self.metadata.n_chans
colorbar_inc = 0.84 / (self.metadata.n_chans - 1)
# group by the grouping frequency, iterate over the groups and plot
data = {}
for idx, group in df.groupby(pd.Grouper(freq=grouping)):
data[idx] = np.mean(np.absolute(self.data[level][group["local"]]), axis=0)
for idx, chan in enumerate(self.metadata.chans):
df_data = pd.DataFrame(
data={k: v[idx] for k, v in data.items()}, index=level_metadata.freqs
)
z = np.log10(df_data.values)
z_min = np.ceil(z.min())
z_max = np.floor(z.max())
z_range = np.arange(z_min, z_max + 1)
colorbar = dict(
tickvals=z_range,
ticktext=[f"10^{int(x)}" for x in z_range],
y=0.92 - idx * colorbar_inc,
len=colorbar_len,
)
heatmap = go.Heatmap(
z=z,
x=pd.to_datetime(df_data.columns) + pd.Timedelta(grouping) / 2,
y=df_data.index,
zmin=z_min,
zmax=z_max,
colorscale="viridis",
colorbar=colorbar,
)
fig.append_trace(heatmap, row=idx + 1, col=1)
return fig
class FourierTransform(ResisticsProcess):
"""
Perform a Fourier transform of the windowed data
The processor is inspired by the scipy.signal.stft function which performs
a similar process and involves a Fourier transform along the last axis of
the windowed data.
Parameters
----------
win_fnc : Union[str, Tuple[str, float]]
The window to use before performing the FFT, by default ("kaiser", 14)
detrend : Union[str, None]
Type of detrending to apply before performing FFT, by default linear
detrend. Setting to None will not apply any detrending to the data prior
to the FFT
workers : int
The number of CPUs to use, by default max | |
import argparse
import os
import pickle
import sys
from pathlib import Path
from collections import OrderedDict
from typing import Union
import numpy as np
import scipy.signal
import scipy.stats
from matplotlib import collections as collections
from matplotlib import pyplot as mpl
from matplotlib import rc
from matplotlib.backends.backend_pdf import PdfPages
from pylibrary.plotting import plothelpers as PH
from pylibrary.tools import cprint
CP = cprint.cprint
from .. import ephysanalysis as EP
from . import digital_filters as DF
from . import minis_methods as minis
"""
Analysis of miniature synaptic potentials
Provides measure of event amplitude distribution and event frequency distribution
The analysis is driven by an imported dictionary.
Example for the data table:
self.basedatadir = '/Volumes/Pegasus/ManisLab_Data3/Sullivan_Chelsea/miniIPSCs'
dataplan_params = {'m1a': {'dir': '2017.04.25_000/slice_000/cell_001', 'prots': [0,1,3],
'thr': 1.75, 'rt': 0.35, 'decay': 6., 'G': 'F/+', 'exclist': []},
'm1b': {'dir': '2017.04.25_000/slice_000/cell_002', 'prots': [7],
'thr': 1.75, 'rt': 0.35, 'decay': 6., 'G': 'F/+', 'exclist': []},
'm2a': {'dir': '2017.05.02_000/slice_000/cell_000/', 'prots': [0,1,2],
'thr': 1.75, 'rt': 0.32, 'decay': 5., 'G': 'F/+', 'exclist': []},
'm2b': {'dir': '2017.05.02_000/slice_000/cell_001', 'prots': [0,1,2],
'thr': 1.75, 'rt': 0.35, 'decay': 4., 'G': 'F/+', 'exclist': {1: [4, 5, 6], 2: [8]}},
'm2c': {'dir': '2017.05.02_000/slice_000/cell_002', 'prots': [0,1,2],
}
Where:
each dict key indicates a cell from a mouse (mice are numbered, cells are lettered)
'dir': The main cell directory, relative to the base directory,
'prots': a list of the protocols to be analyzed,
'exclist': a dict of the protocols that have traces to be excluded
The excluded traces are in a tuple or list for each protocol.
For example, exclist: {0 : [1,2], 1: [3,4,5]} results in the exclusion of
traces 1 and 2 from protocol run 0, and traces 3, 4 and 5 from protocol run 1
'thr' : SD threshold for event detection (algorithm dependent)
'rt' : rise time for template (in msec)
'decay': decay time constant for the template (in msec)
'G' : group identifier (e.g, genotype, treatment, etc.)
The data table can be generated on the fly from another table structure, such as an excel
sheet read by panas, etc.
Requires asome of Manis' support libraries/modules, including:
ephysanalysis module (git clone https://github/pbmanis/ephysanalysis)
pylibrary utilities, (git clone https://github/pbmanis/ephysanalysis)
Output summary is a Python pickle file (.p extension) that isread by mini_summary_plots.py
<NAME>, Ph.D. Jan-March 2018.
Revised 2/2021 to work with new mini_methods output formats.
"""
rc("text", usetex=False)
# rc('font',**{'family':'sans-serif','sans-serif':['Verdana']})
class MiniAnalysis:
def __init__(self, dataplan):
"""
Perform detection of miniature synaptic events, and some analysis
Parameters
----------
dataplan : object
a dataplan object, with
datasource: the name of the file holding the dict
datadir : the path to the data itself
dataplan_params : the dict with information driving the analysis
"""
self.datasource = dataplan.datasource
self.basedatadir = dataplan.datadir
self.shortdir = dataplan.shortdir
self.dataset = dataplan.dataset
self.outputpath = dataplan.outputpath
self.dataplan_params = dataplan.dataplan_params
self.dataplan_data = dataplan.data
self.min_time = dataplan.min_time
self.max_time = dataplan.max_time
self.clamp_name = "Clamp1.ma"
self.protocol_name = "minis"
print('mini_analysis dataplan: ', self.dataplan_params)
try:
self.global_threshold = dataplan.data["global_threshold"]
self.override_threshold = True
except KeyError:
self.override_threshold = False
self.global_decay = None
if "global_decay" in dataplan.data.keys():
self.global_decay = dataplan.data["global_decay"]
self.override_decay = True
else:
self.override_decay = False
print('override decay: ', self.override_decay, self.override_threshold)
self.filter = False
self.filterstring = "no_notch_filter"
try:
self.filter = dataplan.data["notch_filter"]
if self.filter:
self.filterstring = "notch_filtered"
except KeyError:
self.filter = False
self.filterstring = "no_notch_filter"
try:
self.min_event_amplitude = dataplan.data["min_event_amplitude"]
except KeyError:
self.min_event_amplitude = 2.0e-12
def set_clamp_name(self, name):
self.clamp_name = name
def set_protocol_name(self, name):
self.protocol_name = name
# from acq4 functions:
def measure_baseline(self, data, threshold=2.0, iterations=2):
"""Find the baseline value of a signal by iteratively measuring the median value, then excluding outliers."""
data = data.view(np.ndarray)
med = np.median(data)
if iterations > 1:
std = data.std()
thresh = std * threshold
arr = np.ma.masked_outside(data, med - thresh, med + thresh)
try:
len(arr.mask) # could be an array or just a boolean
except TypeError:
if arr.mask is False: # nothing to mask...
return med
if len(arr) == 0:
raise Exception(
"Masked out all data. min: %f, max: %f, std: %f"
% (med - thresh, med + thresh, std)
)
return self.measure_baseline(arr[~arr.mask], threshold, iterations - 1)
else:
return med
def analyze_all(self, fofilename, check=False, mode="aj", engine="cython"):
"""
Wraps analysis of individual data sets, writes plots to
a file named "summarydata%s.p % self.datasource" in pickled format.
Parameters
----------
fofilename : str (no default)
name of the PDF plot output file
check : bool (default: False)
If true, run just checks for the existence of the data files,
but does no analysis.
Returns
-------
Nothing
"""
print('analyze_all')
acqr = EP.acq4read.Acq4Read(
dataname=self.clamp_name
) # creates a new instance every time - probably should just create one.
summarydata = {}
with PdfPages(fofilename) as pdf:
for index, mouse in enumerate(sorted(self.dataplan_params.keys())):
self.analyze_one_cell(
mouse,
pdf,
maxprot=10,
arreader=acqr,
check=check,
mode=mode,
engine=engine,
)
summarydata[mouse] = self.cell_summary
# if index >= 0:
# break
if not check:
print("output file: ", self.shortdir, self.dataset, self.filterstring, mode)
ofile = Path(self.outputpath, f"{self.dataset:s}_{str(self.filterstring):s}_{mode:s}.p")
fout = str(ofile)
print("outfile: ", ofile)
fh = open(fout, "wb")
pickle.dump(summarydata, fh)
fh.close()
else:
print("All files found (an exception would be raised if one was not found)")
def build_summary_dict(self, genotype:Union[str, None] = None, eyfp:str = "ND", mouse:Union[str, int, None]=None):
self.cell_summary = {
"intervals": [],
"amplitudes": [],
"protocols": [],
"eventcounts": [],
"genotype": genotype,
"EYFP": eyfp,
"mouse": mouse,
"amplitude_midpoint": 0.0,
"holding": [],
"averaged": [],
"sign": [],
"threshold": [],
"indiv_evok": [],
"indiv_notok": [],
"indiv_amp": [],
"indiv_fitamp": [],
"indiv_tau1": [],
"indiv_tau2": [],
"indiv_fiterr": [],
"indiv_Qtotal": [],
"indiv_tb": [],
"allevents": [],
"fitted_events": [],
"best_fit": [],
"best_decay_fit": [],
}
def analyze_one_cell(
self,
mouse,
pdf,
maxprot=10,
arreader=None,
check=False,
mode="aj",
engine="cython",
):
"""
Provide analysis of one entry in the data table using the Andrade_Jonas algorithm
and fitting distributions.
Generates a page with plots for all protocols and traces stacked, and
a second page of histograms with
fits for the interval and amplitude distributions
Parameters
----------
mouse : str (no default)
key into the dictionary for the data to be analyzed
pdf : pdf file object (no default):
the pdf file object to write the plots to.
maxprot : int (default: 10)
Maximum numober of protocols to do in the analysis
check : bool (default: False)
If true, run just checks for the existence of the data files,
but does no analysis.
Returns
-------
cell summary dictionary for the 'mouse' entry.
"""
print('analyze one cell')
if arreader is None:
acqr = EP.acq4read.Acq4Read(
dataname=self.clamp_name
) # only if we don't already have one
else:
acqr = arreader
self.rasterize = (
True # set True to rasterize traces to reduce size of final document
) # set false for pub-quality output (but large size)
self.acqr = acqr
dt = 0.1
mousedata = self.dataplan_params[mouse]
if self.override_threshold:
mousedata["thr"] = self.global_threshold # override the threshold setting
if self.override_decay:
mousedata["decay"] = self.global_decay # override decay settings
self.sign = 1
if "sign" in self.dataplan_data:
self.sign = int(self.dataplan_data["sign"])
print("\nMouse: ", mouse)
self.build_summary_dict(genotype=mousedata["G"], eyfp=mousedata['EYFP'], mouse=mouse)
if not check:
self.plot_setup()
datanameposted = False
self.yspan = 40.0
self.ypqspan = 2000.0
ntr = 0
# for all of the protocols that are included for this cell (identified by number and maybe letters)
CP("g", f" mousedata: {str(mousedata):s}")
print(" mousedata prots: ", mousedata["prots"])
if len(mousedata["prots"]) == 0:
print(" No protocols, moving on")
return None
for nprot, dprot in enumerate(mousedata["prots"]):
if nprot > maxprot:
return
self.nprot = nprot
self.dprot = dprot
exclude_traces = []
print(' exclusion list: ', mousedata["exclist"])
if dprot in mousedata["exclist"].keys():
# print (mousedata['exclist'], dprot, nprot)
exclude_traces = mousedata["exclist"][dprot]
# print('dprot: ', dprot)
# print('exc: ', exclude_traces)
sign = self.dataplan_data["sign"]
fn = Path(self.basedatadir, mousedata["dir"])
fx = fn.name
ext = fn.suffix
print(" sign: ", sign)
fn = Path(fn, f"{mousedata['protocol_name']:s}_{dprot:03d}")
print(" Protocol file: ", fn)
split = fn.parts[-4:-1]
# dataname = ""
# for i in range(len(split)):
# dataname = Path(dataname, split[i])
# print('dataname: ', fn)
acqr.setProtocol(fn)
# print(check)
if not check:
print(" Protocol dataname: ", fn)
if len(exclude_traces) > 0:
CP("c", f" Excluding traces: {str(exclude_traces):s}")
else:
print(" No traces excluded")
else:
result = acqr.getData(check=True)
if result is False:
CP('r', f"******* Get data failed to find a file : {str(fn):s}")
CP('r', f" dataname: {fn:s}")
continue
acqr.getData()
# print(len(acqr.data_array))
# if isinstance(acqr.data_array, list):
# acqr.data_array = np.ndarray(acqr.data_array)
# print(acqr.data_array.shape)
oktraces = [x for x in range(acqr.data_array.shape[0]) if x not in exclude_traces]
data = np.array(acqr.data_array[oktraces])
| |
nday_average)
large_df = df.reindex(large_index.dayofyear)
smoothed_df = large_df.rolling(window=nday_average, min_periods=0).mean()
return smoothed_df / divisor
def _line_name(date_index):
start = date_index[0].year
end = date_index[-1].year
if start == end:
return str(start)
else:
return '{start}-{end}'.format(start=start, end=end)
def _scatter_plot_year(series, line_style={}):
line = {'width': 3}
line.update(line_style)
return go.Scatter(
name=series.name,
x=series.index,
y=series.values,
hoverinfo='y',
line=line
)
def _scatter_plot_average(series, name):
return go.Scatter(
name=name,
x=series.index,
y=list(series),
hoverinfo='y',
line={
'color': 'rgba(150, 150, 150, 1)',
'width': 3
}
)
def _scatter_plots_envelope(series_lower, series_upper, name, fillcolor='rgba(240, 240, 240, 1)'):
kwargs = {
'name': name,
'line': {'color': 'rgba(0, 0, 0, 0)'},
'hoverinfo': 'y'
}
upper = go.Scatter(
x=series_upper.index,
y=list(series_upper),
showlegend=False,
**kwargs
)
lower = go.Scatter(
x=series_lower.index,
y=list(series_lower),
fillcolor=fillcolor,
fill='tonexty',
**kwargs
)
return [upper, lower]
def _config_from_file(configfile):
"""Return a dict containing all of the config values found in the given
configfile.
"""
conf = {}
# set from config if possible
if configfile:
with open(configfile, 'r') as fp:
config_yaml = yaml.load(fp)
conf = config_yaml
# in the config yaml, 'years' is a map of years to styles; in the config
# dict used in this module, 'year_styles' is that map and 'years' is
# simply a list of the years to graph
conf['year_styles'] = conf.pop('years', {})
conf['years'] = list(conf['year_styles'].keys())
return conf
def _config_from_cli(cli_args):
cli_config = dict((k, v) for k, v in cli_args.items() if v is not None)
parse = {
'date': lambda x: dt.datetime.strptime(x, '%Y-%m-%d').date(),
'month_bounds': lambda x: [int(month) for month in x.split(',')][0:2],
'percentiles': lambda x: [int(percentile) for percentile in x.split(',')][0:2],
'years': lambda x: [int(year) for year in x.split(',')]
}
for key, parser in parse.items():
if key in cli_config:
cli_config[key] = parser(cli_config[key])
return cli_config
def _validate_config(config):
errors = []
def error(msg):
errors.append('Invalid configuration: {}.'.format(msg))
if config['plot_mean'] and config['plot_median']:
error('can only plot one of mean and median')
# if config['plot_mean'] and config['plot_percentiles']:
# error('cannot plot percentiles with mean')
if config['plot_median'] and config['plot_stdev']:
error('cannot plot stdev with median')
if config['plot_stdev'] and (config['plot_idr'] or config['plot_iqr']):
error('can only plot one of stdev and percentiles')
if config['hemi'] not in ['N', 'S']:
error('hemi must be \'N\' or \'S\'')
if config['nstdevs'] < 0:
error('nstdevs cannot be negative')
if config['nday_average'] < 0:
error('nday_average cannot be negative')
if not os.path.isfile(config['data_store']):
error('data_store must exist')
if config['min_valid'] > config['nday_average']:
error('min_valid must be less than or equal to nday_average')
if config['divisor'] <= 0:
error('divisor must be positive')
if errors:
raise SeaIceToolsError('\n'.join(errors))
def _year_with_most_months_in_index(date_index):
"""
Arguments
---------
date_index: pandas DatetimeIndex. It is expected that every month contained
in the index will have its first day in the index.
"""
count = Counter(date_index[date_index.day == 1].year)
years = [val for val, c in count.items() if c == max(count.values())]
return min(years)
def _get_record_year(series, date, month_bounds, kind):
"""Get the year that should be plotted to represent the record minimum or
maximum for the given series of data.
The returned year may not be the year containing the actual record data
point; graphs can be created with a year changeover, in which case a line
represents multiple years; a single year is still used to generate that
line.
See the Click documentation on main() for information on the arguments not
listed below.
Arguments
---------
series: pandas Series of the data to plot. Its index must be convert-able to
a DatetimeIndex (i.e., with pd.to_datetime()).
kind: a string describing what kind of record to get, and can be one of
these values:
* 'max': the year containing the date with the highest overall value in
the series
* 'min': the year containing the date with the lowest overall value in
the series
"""
series = series.copy()
series.index = pd.to_datetime(series.index)
start_date, end_date = _bounding_date_range(date, *month_bounds)
date_index = pd.date_range(start_date, end_date, freq='D')
extrema = {
'max': 'idxmax',
'min': 'idxmin',
}[kind]
if start_date.year - end_date.year > 1:
raise SeaIceToolsValueError('Can\'t compute record year for more than 2 years.')
single_year = start_date.year == end_date.year
if single_year:
cutoff_date = pd.to_datetime(dt.date(start_date.year, 1, 1))
else:
cutoff_date = pd.to_datetime(start_date)
series = series[series.index < cutoff_date]
record_date = getattr(series, extrema)()
# easy case; no year change in plot, return the record year.
if single_year:
return record_date.year
focus_before_year_switch = date.year == date_index[0].year
focus_after_year_switch = date.year == date_index[-1].year
record_after_year_switch = (record_date.month
in date_index[date_index.year == date_index[0].year].month)
record_before_year_switch = (record_date.month
in date_index[date_index.year == date_index[-1].year].month)
both_after = focus_after_year_switch & record_before_year_switch
both_before = focus_before_year_switch & record_after_year_switch
if both_before or both_after:
return record_date.year
if focus_after_year_switch & record_after_year_switch:
return record_date.year + 1
if focus_before_year_switch & record_before_year_switch:
return record_date.year - 1
# If we get this far, the record's month is not shown directly on the plot.
# Handle special case where record_date could have just happened in the previous year.
if record_date.year == date_index[0].year:
if focus_after_year_switch:
return record_date.year
if focus_before_year_switch:
return record_date.year - 1
most_plotted_year = _year_with_most_months_in_index(date_index)
year = record_date.year + (date.year - most_plotted_year)
return year
def _get_record_low_year(series, date, month_bounds):
"""Return the year containing the lowest single value in series, excluding the
year of the focal date.
See the Click documentation on main() for information on the arguments not
listed below.
Arguments
---------
series: a pandas Series with an index that can be compared with a
datetime.date, e.g., a PeriodIndex or DatetimeIndex.
"""
return _get_record_year(series, date, month_bounds, 'min')
def _get_record_high_year(series, date, month_bounds):
"""Return the year containing the highest single value in series, excluding the
year of the focal date.
See the Click documentation on main() for information on the arguments not
listed below.
Arguments
---------
series: a pandas Series with an index that can be compared with a
datetime.date, e.g., a PeriodIndex or DatetimeIndex.
"""
return _get_record_year(series, date, month_bounds, 'max')
def _handle_special_year_values(config_in):
config = copy.deepcopy(config_in)
df = sit.daily(hemisphere=config['hemi'],
data_store=config['data_store'],
interpolate=1,
nday_average=config['nday_average'],
min_valid=config['min_valid'])
series = df.total_extent_km2
date = config['date']
month_bounds = config['month_bounds']
special_vals = {
'current': date.year,
'record_low_year': _get_record_low_year(series, date, month_bounds),
'record_high_year': _get_record_high_year(series, date, month_bounds)
}
max_years_before = date.year - nt.BEGINNING_OF_SATELLITE_ERA.year
for delta in range(1, max_years_before + 1):
special_vals['years_before_' + str(delta)] = date.year - delta
# "rename" keys in year_styles dict
for old, new in special_vals.items():
if old in config['year_styles']:
style_settings = config['year_styles'].pop(old)
if new in config['year_styles']:
config['year_styles'][new].update(style_settings)
else:
config['year_styles'][new] = style_settings
# replace values in years list
for index, year in enumerate(config['years']):
if year in special_vals:
config['years'][index] = special_vals[year]
config['years'] = [int(year) for year in config['years']]
return config
def _configs_dir():
this_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(this_dir, os.pardir, 'configs')
return os.path.realpath(path)
def _handle_special_legend_settings(config_in):
config = copy.deepcopy(config_in)
if config['legend_side']:
return config
if 'legend' in config:
month = config['date'].month
if month in config['legend']:
config['legend_side'] = config['legend'][month]['legend_side']
return config
def _process_config(kwargs):
# if using a standard_plot, treat it as a special case of using --configfile
standard_plot = kwargs.pop('standard_plot', None)
if standard_plot:
configfile = os.path.join(_configs_dir(), standard_plot + '.yaml')
else:
configfile = kwargs.pop('configfile', None)
cli_config = kwargs
# base config
config = DEFAULTS
# add settings from yaml config file
config.update(_config_from_file(configfile))
# add settings from CLI options
config.update(_config_from_cli(cli_config))
config = _handle_special_legend_settings(config)
# update year keys like "years_before_1" and "record_low_year"
config = _handle_special_year_values(config)
config['years'] = sorted(set(config['years']))
# match years to draw with respective styling configuration in the yaml
config['styles'] = [config['year_styles'].get(year, {}) for year in config['years']]
if config['output_file'] == DEFAULTS['output_file']:
config['output_file'] = DEFAULTS['output_file'].format(hemi=config['hemi'])
if config['output_dir']:
config['output_file'] = os.path.join(config['output_dir'], config['output_file'])
config['percentiles'] = []
if config['plot_median']:
config['percentiles'].extend([50])
if config['plot_iqr']:
config['percentiles'].extend([25, 75])
if config['plot_idr']:
config['percentiles'].extend([10, 90])
_validate_config(config)
return config
@click.command()
@click.option('--standard_plot', type=click.Choice(['north', 'south',
'asina_north', 'asina_south',
'north_iqr', 'south_iqr',
'asina_north_iqr', 'asina_south_iqr']),
help=('Use predefined settings to generate a standard timeseries image. Other '
'options will override the values set in the standard plot config. This option '
'overrides --configfile.'))
@click.option('-c', '--configfile', type=click.Path(exists=True, dir_okay=False),
help=('YAML file containing settings for the graph. All of the flags described below '
'can be set in this YAML file. Values set with the CLI flags will override '
'values found in the configuration file. One important exception is that '
'--years are handled a bit differently; custom styling for each year can be '
'defined, and only can be defined in the yaml configuration. The setting for '
'--legend_side can also be customized by month in the yaml file. For example '
'configurations, the files used by --standard_plot can be found in {}. This '
'option is ignored if --standard_plot is used.'.format(_configs_dir())))
@click.option('--data_store', type=click.Path(exists=True, dir_okay=False),
| |
patient """
# Temperature
aggregated_df['temp_SIRS'] = 0
aggregated_df.loc[aggregated_df['223762'] < 10, '223762'] = float("NaN")
aggregated_df.loc[aggregated_df['223762'] > 50, '223762'] = float("NaN")
aggregated_df.loc[aggregated_df['223761'] < 70, '223761'] = float("NaN")
aggregated_df.loc[aggregated_df['223761'] > 120, '223761'] = float("NaN")
aggregated_df.loc[aggregated_df['223762'] > 38, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223762'] < 36, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223761'] > 100.4, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223761'] < 96.8, 'temp_SIRS'] = 1
# Heart rate
aggregated_df['hr_SIRS'] = 0
aggregated_df.loc[aggregated_df['220045'] > 300, '220045'] = float("NaN")
aggregated_df.loc[aggregated_df['220045'] < 0, '220045'] = float("NaN")
aggregated_df.loc[aggregated_df['220045'] > 90, 'hr_SIRS'] = 1
# Respiratory rate
aggregated_df['resp_SIRS'] = 0
aggregated_df.loc[aggregated_df['220210'] > 70, '220210'] = float("NaN")
aggregated_df.loc[aggregated_df['220210'] < 0, '220210'] = float("NaN")
aggregated_df.loc[aggregated_df['224690'] > 70, '224690'] = float("NaN")
aggregated_df.loc[aggregated_df['224690'] < 0, '224690'] = float("NaN")
aggregated_df.loc[aggregated_df['220210'] > 20, 'resp_SIRS'] = 1
aggregated_df.loc[aggregated_df['224690'] > 20, 'resp_SIRS'] = 1
# WBC
aggregated_df['wbc_SIRS'] = 0
aggregated_df.loc[aggregated_df['51301'] > 12, 'wbc_SIRS'] = 1
aggregated_df.loc[aggregated_df['51301'] < 4, 'wbc_SIRS'] = 1
# Aggregation
sirs_cols = ['temp_SIRS', 'hr_SIRS', 'resp_SIRS', 'wbc_SIRS']
aggregated_df[sirs_cols] = aggregated_df.groupby('hadm_id')[sirs_cols].ffill().fillna(0).astype(int)
aggregated_df['SIRS'] = aggregated_df[sirs_cols].sum(axis=1)
aggregated_df.drop(columns=sirs_cols, inplace=True)
return aggregated_df
def calculate_SOFA(aggregated_df):
""" returns a dataframe with an additional column for SOFA score at every hour for the patient """
scores = [0, 1, 2, 3, 4]
reverse_scores = [4, 3, 2, 1, 0]
# Respiration
aggregated_df.loc[aggregated_df['223835'] < 1, '223835'] = aggregated_df['223835'] * 100
aggregated_df.loc[aggregated_df['223835'] < 20, '223835'] = float("NaN")
aggregated_df['pao2fio2ratio'] = aggregated_df['50821'] / aggregated_df['223835'] * 100
aggregated_df['pao2fio2ratio_novent'] = aggregated_df.loc[aggregated_df['InvasiveVent']==0]['pao2fio2ratio']
aggregated_df['pao2fio2ratio_vent'] = aggregated_df.loc[aggregated_df['InvasiveVent']==1]['pao2fio2ratio']
aggregated_df['resp_SOFA'] = 0
aggregated_df.loc[aggregated_df['pao2fio2ratio_novent'] < 400, 'resp_SOFA'] = 1
aggregated_df.loc[aggregated_df['pao2fio2ratio_novent'] < 300, 'resp_SOFA'] = 2
aggregated_df.loc[aggregated_df['pao2fio2ratio_vent'] < 200, 'resp_SOFA'] = 3
aggregated_df.loc[aggregated_df['pao2fio2ratio_vent'] < 100, 'resp_SOFA'] = 4
# Liver
bilirubin_bins = [-1, 1.2, 2, 6, 12, float("inf")]
aggregated_df['liver_SOFA'] = pd.cut(aggregated_df['50885'], bilirubin_bins, labels=scores).astype('float')
# Coagulation
coag_bins = [-1, 20, 50, 100, 150, float("inf")]
aggregated_df['coag_SOFA'] = pd.cut(aggregated_df['51265'], coag_bins, labels=reverse_scores).astype('float')
# Renal
creat_bins = [-1, 1.2, 2, 3.5, 5, float("inf")]
aggregated_df['renal_SOFA'] = pd.cut(aggregated_df['50912'], creat_bins, labels=scores).astype('float')
urine_output_cols = ['226559', '226560', '226561', '226584', '226563', '226564', '226565', '226567',
'226557', '226558', '227488', '227489']
aggregated_df.loc[aggregated_df['227488']>0, '227488'] = -aggregated_df['227488']
aggregated_df['urine_output'] = aggregated_df[urine_output_cols].sum(axis=1)
aggregated_df.loc[aggregated_df['urine_output'] < 500, 'renal_SOFA'] = 3
aggregated_df.loc[aggregated_df['urine_output'] < 200, 'renal_SOFA'] = 4
# Cardiovascular
# features = [221662, 221653, 221289, 221906] # dopamine, dobutamine, epinephrine, norepinephrine
aggregated_df.loc[(aggregated_df['221662_rate']>0) | (aggregated_df['221653_rate']>0), 'cardio_SOFA'] = 2
aggregated_df.loc[(aggregated_df['221662_rate']>5) | ((aggregated_df['221289_rate'] > 0) & (aggregated_df['221289_rate']<=0.1)) | ((aggregated_df['221906_rate'] > 0) & (aggregated_df['221906_rate']<=0.1)), 'cardio_SOFA'] = 3
aggregated_df.loc[(aggregated_df['221662_rate']>15) | (aggregated_df['221289_rate']>0.1) | (aggregated_df['221906_rate'] > 0.1), 'cardio_SOFA'] = 4
# GCS
# [220739, 223900, 223901] GCS-Eye, GCS-Verbal, GCS-Motor
aggregated_df['220739'] = aggregated_df.groupby('hadm_id')['220739'].ffill().fillna(4).astype(int)
aggregated_df['223900'] = aggregated_df.groupby('hadm_id')['223900'].ffill().fillna(5).astype(int)
aggregated_df['223901'] = aggregated_df.groupby('hadm_id')['223901'].ffill().fillna(6).astype(int)
aggregated_df['gcs'] = aggregated_df['220739'] + aggregated_df['223900'] + aggregated_df['223901']
aggregated_df.loc[aggregated_df['223900'] == 0, 'gcs'] = 15
gcs_bins = [-1, 6, 9, 12, 14, 16]
aggregated_df['gcs_SOFA'] = pd.cut(aggregated_df['gcs'], gcs_bins, labels=reverse_scores).astype('float')
# forwardfill for SOFA scores first, then replace NA's with 0.
sofa_cols = ['liver_SOFA', 'coag_SOFA', 'renal_SOFA', 'cardio_SOFA', 'resp_SOFA', 'gcs_SOFA']
aggregated_df[sofa_cols] = aggregated_df.groupby('hadm_id')[sofa_cols].ffill().fillna(0).astype(int)
aggregated_df['SOFA'] = aggregated_df[sofa_cols].sum(axis=1)
sofa_cols = sofa_cols + ['gcs', 'urine_output']
aggregated_df.drop(columns=sofa_cols, inplace=True)
return aggregated_df
def calculate_SI(aggregated_df):
""" calculates suspicion of infection as per Sepsis-3 on aggregated hourly dataframe and saves it under the column `suspicion_of_infection`.
Note:
aggregated_df must contain `antibiotics` and `microbio-sample` columns.
"""
df = aggregated_df[['hadm_id', 'hour', 'antibiotics', 'microbio-sample']] # reduce data, speeds up computation
df['antibiotics'].fillna(0, inplace=True)
def _fix_columns(antibiotics_window_df):
"""Fixes resulting columns/index from GroupBy.rolling so that there are just hadm_id, hour, and antibiotics cols"""
if 'hadm_id' in antibiotics_window_df.index.names and 'hadm_id' in df.columns:
antibiotics_window_df.drop(columns='hadm_id', inplace=True)
if 'hour' in antibiotics_window_df.index.names and 'hour' in df.columns:
antibiotics_window_df.drop(columns='hour', inplace=True)
antibiotics_window_df = antibiotics_window_df.reset_index()[['hadm_id', 'hour', 'antibiotics']]
return antibiotics_window_df
antibiotics_last_24h = df.groupby('hadm_id').rolling(on='hour', window=24, min_periods=1).antibiotics.sum()
antibiotics_last_24h = _fix_columns(antibiotics_last_24h)
antibiotics_last_24h = antibiotics_last_24h.rename(columns={'antibiotics': 'antibiotics_last_24h'})
antibiotics_next_72h = df[::-1].groupby('hadm_id').rolling(on='hour', window=72, min_periods=1).antibiotics.sum()[::-1]
antibiotics_next_72h = _fix_columns(antibiotics_next_72h)
antibiotics_next_72h = antibiotics_next_72h.rename(columns={'antibiotics': 'antibiotics_next_72h'})
df = df.merge(antibiotics_last_24h, on=['hadm_id', 'hour'])
df = df.merge(antibiotics_next_72h, on=['hadm_id', 'hour'])
microbio_sample = df['microbio-sample'] == 1
suspicion_of_infection = microbio_sample & (df['antibiotics_last_24h'] > 0)
suspicion_of_infection |= microbio_sample & (df['antibiotics_next_72h'] > 0)
aggregated_df['suspicion_of_infection'] = suspicion_of_infection
return aggregated_df
def _sepsis_sofa_diff(df, hours_before_si=48, hours_after_si=24, metric='SOFA', sepsis_col='sepsis',
decrease_baseline=False, sofa_diff_threshold=2, ):
"""Computes sepsis indicator labels for a single patient, by comparing SOFA score at each timestep in window around SI to
baseline value from first hour of window.
Based off the following script by <NAME>:
https://github.com/BorgwardtLab/mgp-tcn/blob/master/src/query/compute_sepsis_onset_from_exported_sql_table.py
Parameters:
- df: hourly values for patient (must contain columns 'hour', 'suspicion_of_infection', and 'SOFA' or other metric)
- hours_before_si: defines size of window around SI
- hours_after_si: defines size of window around SI
- metric: column name of to check for acute increase of (SOFA or SIRS)
- sepsis_col: which column to store sepsis flag under.
- decrease_baseline: whether to decrease the baseline if a lower SOFA value occurs during window.
- sofa_diff_threshold: threshold of SOFA-increase for sepsis to occur (default: 2)
Note:
Sepsis onset time is set to be the time of of SOFA-increase.
"""
df[sepsis_col] = False # initalize to all False
df_s = df.iloc[np.argsort(df.hour)] # sort by hour, increasing
si_hours_df = df_s.loc[df_s.suspicion_of_infection == 1]
si_hours = si_hours_df.hour.tolist()
for i, si_hour in enumerate(si_hours):
# for every SI ocurrence, calculate window around hour of SI
si_window = df_s.loc[(si_hour-hours_before_si <= df_s.hour) & (df_s.hour <= si_hour+hours_after_si)]
si_window["SI_hour"] = si_hour
# check if there is an increase in SOFA during window
sofa = np.array(si_window[metric])
min_sofa = sofa[0]
for i in range(len(sofa)):
current_sofa = sofa[i]
if decrease_baseline and current_sofa < min_sofa:
min_sofa = current_sofa
else:
diff = current_sofa - min_sofa
if diff >= sofa_diff_threshold:
# if there was an increase >= 2, set sepsis-time to SOFA-increase time
sepsis_time = si_window['hour'].iloc[i]
df.loc[df.hour == sepsis_time, sepsis_col] = True
# if there was an increase >= 2, set sepsis-time to SI-time
#df.loc[df.hour == si_hour, sepsis_col] = True
break # break to outer for-loop
return df[sepsis_col]
def calculate_sepsis(aggregated_df, hours_before_si=48, hours_after_si=24, task='sepsis3', consider_difference=True, decrease_baseline=True):
""" Calculates sepsis labels from hourly SOFA/SIRS and suspicion of infection.
Note:
Similar to other implementations, sepsis-3 is considered to happen if SOFA was >= 2 at any point in a window
around a suspicion of infection. Thus, it is not considered whether the SOFA increased or decreased from the start value.
Parameters:
- aggregated_df
- hours_before_si: how many hours previous to the SI-time to evaluate SOFA scores for.
- hours_after_si: how many hours after the SI-time to evaluate SOFA scores for.
- metric:
- consider_difference: if true, will use slower algorithm that considers increase in metric by 2 from baseline instead of any values >= 2.
Returns: aggregated_df with two additional columns:
- 'sepsis': a binary label indicating times of sepsis.
- 'sepsis_onset' a binary label only containing the first case of sepsis per-admission.
"""
if task == 'sepsis1':
metric = 'SIRS'
elif task == 'sepsis3':
metric = 'SOFA'
else:
raise ValueError ("Task undefined: please choose between sepsis1 and sepsis3")
if not consider_difference:
max_sofa_last_x_hours = aggregated_df[['hadm_id', 'hour', metric]].groupby('hadm_id').rolling(on='hour', window=hours_before_si, min_periods=1)[metric].max()
indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=hours_after_si)
max_sofa_next_y_hours = aggregated_df[['hadm_id', 'hour', metric]].groupby('hadm_id').rolling(on='hour', window=indexer, min_periods=1)[metric].max()
df = aggregated_df[['hadm_id', 'hour', 'suspicion_of_infection']].set_index(['hadm_id', 'hour'])
df['max_sofa_last_x_hours'] = max_sofa_last_x_hours
df['max_sofa_next_y_hours'] = max_sofa_next_y_hours
df.reset_index(inplace=True)
sepsis = df['suspicion_of_infection'] & (df.max_sofa_last_x_hours >= 2)
sepsis |= df['suspicion_of_infection'] & (df.max_sofa_next_y_hours >= 2)
aggregated_df[task] = sepsis
else:
print("Computing sepsis")
start = time.time()
sepsis = aggregated_df[['hadm_id','hour','suspicion_of_infection',metric]].groupby("hadm_id").apply(_sepsis_sofa_diff, hours_after_si=hours_after_si,
hours_before_si=hours_before_si, metric=metric,
decrease_baseline=decrease_baseline)
sepsis.index = sepsis.index.get_level_values(1) #drop hadm_id index to have same index as aggregated_df
aggregated_df[task] = sepsis
print("Took", time.time()-start, "s")
# compute first point of sepsis3 per admission
sepsis_onset = aggregated_df.loc[sepsis == True, ['hadm_id', 'hour', task]].sort_values(['hour']).groupby('hadm_id').first()
sepsis_onset = sepsis_onset.rename(columns={task: task+"_onset"}).reset_index()
aggregated_df = aggregated_df.merge(sepsis_onset, on=['hadm_id','hour'], how='left')
aggregated_df[task+'_onset'].fillna(False, inplace=True)
return aggregated_df
def create_metadata_json(static_df, timeseries_df=None):
"""Creates metadata json from dataframes
"""
metadata_json = []
for _, row in tqdm(static_df.iterrows(), total=len(static_df)):
patient_json = {}
for key, value in row.items():
if type(value) is pd.Timestamp:
patient_json[key] = str(value)
else:
patient_json[key] = value
# add non-static features under the key 'hourly_values'
if timeseries_df is not None:
hourly_vals = timeseries_df.loc[timeseries_df['subject_id'] == row['subject_id']]
assert len(hourly_vals) > 0, "Zero rows found for patient"
patient_json['hourly_values'] = []
cols = hourly_vals.columns[~(hourly_vals.columns == 'subject_id')]
for _, row in hourly_vals.loc[:, cols].iterrows():
patient_json['hourly_values'].append(dict(row.items()))
metadata_json.append(patient_json)
return metadata_json
MIMIC_IV_CSV_SUBFOLDER = {
'patients': 'core',
'admissions': 'core',
'chartevents': 'icu',
'icustays': 'icu',
'inputevents': 'icu',
'outputevents': 'icu',
'procedureevents': 'icu',
'labevents': 'hosp',
'microbiologyevents': 'hosp',
'prescriptions': 'hosp',
'diagnoses_icd': 'hosp',
}
MIMIC_IV_CSV_DTYPES = {
'patients': {
'subject_id': np.uint32,
'gender': str,
'anchor_age': np.uint32,
| |
<filename>vumi/persist/fields.py
# -*- test-case-name: vumi.persist.tests.test_fields -*-
"""Field types for Vumi's persistence models."""
from datetime import datetime
from vumi.message import VUMI_DATE_FORMAT
from vumi.utils import to_kwargs
class ValidationError(Exception):
"""Raised when a value assigned to a field is invalid."""
class FieldDescriptor(object):
"""Property for getting and setting fields."""
def __init__(self, key, field):
self.key = key
self.field = field
if self.field.index:
if self.field.index_name is None:
self.index_name = "%s_bin" % self.key
else:
self.index_name = field.index_name
else:
self.index_name = None
def setup(self, model_cls):
self.model_cls = model_cls
def validate(self, value):
self.field.validate(value)
def initialize(self, modelobj, value):
self.__set__(modelobj, value)
def _add_index(self, modelobj, value):
# XXX: The underlying libraries call str() on whatever index values we
# provide, so we do this explicitly here and special-case None.
#
# Index values in Riak have to be non-empty, so a zero-length string
# counts as "no value". Since we still have legacy data that was
# inadvertantly indexed with "None" because of the str() call in the
# library and we still have legacy code that relies on an index search
# for a value of "None", fixing this properly here will break existing
# functionality. Once we have rewritten the offending code to not use
# "None" in the index, we can remove the hack below and be happier.
if value is None:
value = ''
# FIXME: We still rely on this being "None" in places. :-(
value = 'None'
modelobj._riak_object.add_index(self.index_name, str(value))
def set_value(self, modelobj, value):
"""Set the value associated with this descriptor."""
raw_value = self.field.to_riak(value)
modelobj._riak_object._data[self.key] = raw_value
if self.index_name is not None:
modelobj._riak_object.remove_index(self.index_name)
self._add_index(modelobj, raw_value)
def get_value(self, modelobj):
"""Get the value associated with this descriptor."""
raw_value = modelobj._riak_object._data.get(self.key)
return self.field.from_riak(raw_value)
def clean(self, modelobj):
"""Do any cleanup of the model data for this descriptor after loading
the data from Riak."""
pass
def __repr__(self):
return "<%s key=%s field=%r>" % (self.__class__.__name__, self.key,
self.field)
def __get__(self, instance, owner):
if instance is None:
return self.field
return self.get_value(instance)
def __set__(self, instance, value):
# instance can never be None here
self.validate(value)
self.set_value(instance, value)
class Field(object):
"""Base class for model attributes / fields.
:param object default:
Default value for the field. The default default is None.
:param boolean null:
Whether None is allowed as a value. Default is False (which
means the field must either be specified explicitly or by
a non-None default).
:param boolen index:
Whether the field should also be indexed. Default is False.
:param string index_name:
The name to use for the index. The default is the field name
followed by _bin.
"""
descriptor_class = FieldDescriptor
# whether an attempt should be made to initialize the field on
# model instance creation
initializable = True
def __init__(self, default=None, null=False, index=False, index_name=None):
self.default = default
self.null = null
self.index = index
self.index_name = index_name
def get_descriptor(self, key):
return self.descriptor_class(key, self)
def validate(self, value):
"""Validate a value.
Checks null values and calls .validate() for non-null
values. Raises ValidationError if a value is invalid.
"""
if not self.null and value is None:
raise ValidationError("None is not allowed as a value for non-null"
" fields.")
if value is not None:
self.custom_validate(value)
def custom_validate(self, value):
"""Check whether a non-null value is valid for this field."""
pass
def to_riak(self, value):
return self.custom_to_riak(value) if value is not None else None
def custom_to_riak(self, value):
"""Convert a non-None value to something storable by Riak."""
return value
def from_riak(self, raw_value):
return (self.custom_from_riak(raw_value)
if raw_value is not None else None)
def custom_from_riak(self, raw_value):
"""Convert a non-None value stored by Riak to Python."""
return raw_value
class Integer(Field):
"""Field that accepts integers.
Additional parameters:
:param integer min:
Minimum allowed value (default is `None` which indicates no minimum).
:param integer max:
Maximum allowed value (default is `None` which indicates no maximum).
"""
def __init__(self, min=None, max=None, **kw):
super(Integer, self).__init__(**kw)
self.min = min
self.max = max
def custom_validate(self, value):
if not isinstance(value, (int, long)):
raise ValidationError("Value %r is not an integer." % (value,))
if self.min is not None and value < self.min:
raise ValidationError("Value %r too low (minimum value is %d)."
% (value, self.min))
if self.max is not None and value > self.max:
raise ValidationError("Value %r too high (maximum value is %d)."
% (value, self.max))
class Boolean(Field):
"""Field that is either True or False.
"""
def custom_validate(self, value):
if not isinstance(value, bool):
raise ValidationError('Value %r is not a boolean.' % (value,))
class Unicode(Field):
"""Field that accepts unicode strings.
Additional parameters:
:param integer max_length:
Maximum allowed length (default is `None` which indicates no maximum).
"""
def __init__(self, max_length=None, **kw):
super(Unicode, self).__init__(**kw)
self.max_length = max_length
def custom_validate(self, value):
if not isinstance(value, unicode):
raise ValidationError("Value %r is not a unicode string."
% (value,))
if self.max_length is not None and len(value) > self.max_length:
raise ValidationError("Value %r too long (maximum length is %d)."
% (value, self.max_length))
class Tag(Field):
"""Field that represents a Vumi tag."""
def custom_validate(self, value):
if not isinstance(value, tuple) or len(value) != 2:
raise ValidationError("Tags %r should be a (pool, tag_name)"
" tuple" % (value,))
def custom_to_riak(self, value):
return list(value)
def custom_from_riak(self, value):
return tuple(value)
class Timestamp(Field):
"""Field that stores a datetime."""
def custom_validate(self, value):
if not isinstance(value, datetime):
raise ValidationError("Timestamp field expects a datetime.")
def custom_to_riak(self, value):
return value.strftime(VUMI_DATE_FORMAT)
def custom_from_riak(self, value):
return datetime.strptime(value, VUMI_DATE_FORMAT)
class Json(Field):
"""Field that stores an object that can be serialized to/from JSON."""
pass
class VumiMessageDescriptor(FieldDescriptor):
"""Property for getting and setting fields."""
def setup(self, model_cls):
super(VumiMessageDescriptor, self).setup(model_cls)
if self.field.prefix is None:
self.prefix = "%s." % self.key
else:
self.prefix = self.field.prefix
def _clear_keys(self, modelobj):
for key in modelobj._riak_object._data.keys():
if key.startswith(self.prefix):
del modelobj._riak_object._data[key]
def _timestamp_to_json(self, dt):
return dt.strftime(VUMI_DATE_FORMAT)
def _timestamp_from_json(self, value):
return datetime.strptime(value, VUMI_DATE_FORMAT)
def set_value(self, modelobj, msg):
"""Set the value associated with this descriptor."""
self._clear_keys(modelobj)
if msg is None:
return
for key, value in msg.payload.iteritems():
# TODO: timestamp as datetime in payload must die.
if key == "timestamp":
value = self._timestamp_to_json(value)
full_key = "%s%s" % (self.prefix, key)
modelobj._riak_object._data[full_key] = value
def get_value(self, modelobj):
"""Get the value associated with this descriptor."""
payload = {}
for key, value in modelobj._riak_object._data.iteritems():
if key.startswith(self.prefix):
key = key[len(self.prefix):]
# TODO: timestamp as datetime in payload must die.
if key == "timestamp":
value = self._timestamp_from_json(value)
payload[key] = value
if not payload:
return None
return self.field.message_class(**to_kwargs(payload))
class VumiMessage(Field):
"""Field that represents a Vumi message.
Additional parameters:
:param class message_class:
The class of the message objects being stored.
Usually one of Message, TransportUserMessage or TransportEvent.
:param string prefix:
The prefix to use when storing message payload keys in Riak. Default is
the name of the field followed by a dot ('.').
"""
descriptor_class = VumiMessageDescriptor
def __init__(self, message_class, prefix=None, **kw):
super(VumiMessage, self).__init__(**kw)
self.message_class = message_class
self.prefix = prefix
def custom_validate(self, value):
if not isinstance(value, self.message_class):
raise ValidationError("Message %r should be an instance of %r"
% (value, self.message_class))
class FieldWithSubtype(Field):
"""Base class for a field that is a collection of other fields of a
single type.
:param Field field_type:
The field specification for the dynamic values. Default is Unicode().
"""
def __init__(self, field_type=None, **kw):
super(FieldWithSubtype, self).__init__(**kw)
if field_type is None:
field_type = Unicode()
if field_type.descriptor_class is not FieldDescriptor:
raise RuntimeError("Dynamic fields only supports fields that"
" that use the basic FieldDescriptor class")
self.field_type = field_type
def validate(self, value):
self.field_type.validate(value)
def to_riak(self, value):
return self.field_type.to_riak(value)
def from_riak(self, value):
return self.field_type.from_riak(value)
class DynamicDescriptor(FieldDescriptor):
"""A field descriptor for dynamic fields."""
def setup(self, model_cls):
super(DynamicDescriptor, self).setup(model_cls)
if self.field.prefix is None:
self.prefix = "%s." % self.key
else:
self.prefix = self.field.prefix
def initialize(self, modelobj, valuedict):
if valuedict is not None:
self.update(modelobj, valuedict)
def get_value(self, modelobj):
return DynamicProxy(self, modelobj)
def set_value(self, modelobj, value):
raise RuntimeError("DynamicDescriptors should never be assigned to.")
def iterkeys(self, modelobj):
prefix_len = len(self.prefix)
return (key[prefix_len:]
for key in modelobj._riak_object._data.iterkeys()
if key.startswith(self.prefix))
def iteritems(self, modelobj):
prefix_len = len(self.prefix)
from_riak = self.field.from_riak
return ((key[prefix_len:], from_riak(value))
for key, value in modelobj._riak_object._data.iteritems()
if key.startswith(self.prefix))
def update(self, modelobj, otherdict):
# this is a separate method so it can succeed or fail
# somewhat atomically in the case where otherdict contains
# bad keys or values
items = [(self.prefix + key, self.field.to_riak(value))
for | |
#!/usr/bin/python
import argparse
import copy
import getpass
import io
import os
from ruamel.yaml import (YAML, YAMLError, )
import socket
import sys
import unittest
import fsl_sub.cmdline
from unittest.mock import patch
YAML_CONF = '''---
method: sge
modulecmd: /usr/bin/modulecmd
thread_control:
- OMP_NUM_THREADS
- MKL_NUM_THREADS
- MKL_DOMAIN_NUM_THREADS
- OPENBLAS_NUM_THREADS
- GOTO_NUM_THREADS
preserve_modules: True
export_vars: []
method_opts:
sge:
queues: True
large_job_split_pe: shmem
copy_environment: True
affinity_type: linear
affinity_control: threads
mail_support: True
mail_modes:
b:
- b
e:
- e
a:
- a
f:
- a
- e
- b
n:
- n
mail_mode: a
map_ram: True
ram_resources:
- m_mem_free
- h_vmem
job_priorities: True
min_priority: -1023
max_priority: 0
array_holds: True
array_limit: True
architecture: False
job_resources: True
projects: False
script_conf: True
coproc_opts:
cuda:
resource: gpu
classes: True
class_resource: gputype
class_types:
K:
resource: k80
doc: Kepler. ECC, double- or single-precision workloads
capability: 2
P:
resource: p100
doc: >
Pascal. ECC, double-, single- and half-precision
workloads
capability: 3
default_class: K
include_more_capable: True
uses_modules: True
no_binding: True
module_parent: cuda
queues:
gpu.q:
time: 18000
max_size: 250
slot_size: 64
max_slots: 20
copros:
cuda:
max_quantity: 4
classes:
- K
- P
- V
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 0
default: true
a.qa,a.qb,a.qc:
time: 1440
max_size: 160
slot_size: 4
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 3
group: 1
default: true
a.qa,a.qc:
time: 1440
max_size: 240
slot_size: 16
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 2
group: 1
default: true
a.qc:
time: 1440
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 1
default: true
b.qa,b.qb,b.qc:
time: 10080
max_size: 160
slot_size: 4
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 3
group: 2
b.qa,b.qc:
time: 10080
max_size: 240
slot_size: 16
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 2
group: 2
b.qc:
time: 10080
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 2
t.q:
time: 10080
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- specialpe
priority: 1
group: 2
default_queues:
- a.qa,a,qb,a.qc
- a.qa,a.qc
- a.qc
'''
YAML_CONF_PROJECTS = '''---
method: sge
modulecmd: /usr/bin/modulecmd
thread_control:
- OMP_NUM_THREADS
- MKL_NUM_THREADS
- MKL_DOMAIN_NUM_THREADS
- OPENBLAS_NUM_THREADS
- GOTO_NUM_THREADS
preserve_modules: True
export_vars: []
method_opts:
sge:
queues: True
large_job_split_pe: shmem
copy_environment: True
affinity_type: linear
affinity_control: threads
mail_support: True
mail_modes:
b:
- b
e:
- e
a:
- a
f:
- a
- e
- b
n:
- n
mail_mode: a
map_ram: True
ram_resources:
- m_mem_free
- h_vmem
job_priorities: True
min_priority: -1023
max_priority: 0
array_holds: True
array_limit: True
architecture: False
job_resources: True
projects: True
script_conf: True
coproc_opts:
cuda:
resource: gpu
classes: True
class_resource: gputype
class_types:
K:
resource: k80
doc: Kepler. ECC, double- or single-precision workloads
capability: 2
P:
resource: p100
doc: >
Pascal. ECC, double-, single- and half-precision
workloads
capability: 3
default_class: K
include_more_capable: True
no_binding: True
uses_modules: True
module_parent: cuda
queues:
gpu.q:
time: 18000
max_size: 250
slot_size: 64
max_slots: 20
copros:
cuda:
max_quantity: 4
classes:
- K
- P
- V
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 0
default: true
a.qa,a.qb,a.qc:
time: 1440
max_size: 160
slot_size: 4
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 3
group: 1
default: true
a.qa,a.qc:
time: 1440
max_size: 240
slot_size: 16
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 2
group: 1
default: true
a.qc:
time: 1440
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 1
default: true
b.qa,b.qb,b.qc:
time: 10080
max_size: 160
slot_size: 4
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 3
group: 2
b.qa,b.qc:
time: 10080
max_size: 240
slot_size: 16
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 2
group: 2
b.qc:
time: 10080
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 2
t.q:
time: 10080
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- specialpe
priority: 1
group: 2
default_queues:
- a.qa,a,qb,a.qc
- a.qa,a.qc
- a.qc
'''
USER_EMAIL = "{username}@{hostname}".format(
username=getpass.getuser(),
hostname=socket.gethostname()
)
class FakePlugin(object):
def submit(self):
pass
def qtest(self):
pass
def queue_exists(self):
pass
def plugin_version(self):
return '1.2.0'
def already_queued(self):
return False
class TestMisc(unittest.TestCase):
def test_titlize_key(self):
self.assertEqual(
'A Word',
fsl_sub.utils.titlize_key(
'a_word'
)
)
def test_blank_none(self):
self.assertEqual(
fsl_sub.utils.blank_none(1),
'1'
)
self.assertEqual(
fsl_sub.utils.blank_none(None),
''
)
self.assertEqual(
fsl_sub.utils.blank_none('A'),
'A'
)
self.assertEqual(
fsl_sub.utils.blank_none(['a', 'b']),
"['a', 'b']"
)
@patch(
'fsl_sub.cmdline.load_plugins',
autospec=True,
return_value={'fsl_sub_plugin_sge': FakePlugin()}
)
@patch(
'fsl_sub.shell_modules.read_config',
autospec=True,
return_value=YAML(typ='safe').load(YAML_CONF))
@patch(
'fsl_sub.cmdline.read_config',
autospec=True,
return_value=YAML(typ='safe').load(YAML_CONF))
@patch(
'fsl_sub.config.read_config',
autospec=True,
return_value=YAML(typ='safe').load(YAML_CONF))
@patch(
'fsl_sub.cmdline.submit',
autospec=True,
return_value=123)
@patch(
'fsl_sub.cmdline.get_modules', autospec=True,
return_value=['7.5', '8.0', ])
@patch(
'fsl_sub.coprocessors.get_modules',
autospec=True, return_value=['7.5', '8.0', ])
class TestMain(unittest.TestCase):
def setUp(self):
self.yaml = YAML(typ='safe')
self. base_args = {
'architecture': None,
'array_hold': None,
'array_limit': None,
'array_specifier': None,
'array_task': False,
'coprocessor': None,
'coprocessor_toolkit': None,
'export_vars': [],
'coprocessor_class': None,
'coprocessor_class_strict': False,
'coprocessor_multi': 1,
'name': None,
'parallel_env': None,
'queue': None,
'threads': 1,
'jobhold': None,
'jobram': None,
'jobtime': None,
'keep_jobscript': False,
'logdir': None,
'mail_on': None,
'mailto': USER_EMAIL,
'priority': None,
'ramsplit': True,
'requeueable': True,
'resources': None,
'usescript': False,
'validate_command': True,
'as_tuple': False,
'project': None
}
def test_noramsplit(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['--noramsplit', '1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['ramsplit'] = False
args[2].assert_called_with(
['1', '2', ],
**test_args
)
def test_parallelenv(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['--parallelenv', 'shmem,2', '1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['parallel_env'] = 'shmem'
test_args['threads'] = 2
args[2].assert_called_with(
['1', '2', ],
**test_args
)
args[2].reset_mock()
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-s', 'shmem,2', '1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['parallel_env'] = 'shmem'
test_args['threads'] = 2
args[2].assert_called_with(
['1', '2', ],
**test_args
)
def test_mailoptions(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['--mailoptions', 'n', '1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['mail_on'] = 'n'
args[2].assert_called_with(
['1', '2', ],
**test_args
)
def test_mailto(self, *args):
mailto = '<EMAIL>'
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['--mailto', mailto, '1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['mailto'] = mailto
args[2].assert_called_with(
['1', '2', ],
**test_args
)
args[2].reset_mock()
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-M', mailto, '1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
args[2].assert_called_with(
['1', '2', ],
**test_args
)
def test_array_task(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['--array_task', 'taskfile', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['array_task'] = True
args[2].assert_called_with(
'taskfile',
**test_args
)
args[2].reset_mock()
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-t', 'taskfile', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
args[2].assert_called_with(
'taskfile',
**test_args
)
def test_array_limit(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(
['--array_task', 'commandfile', '--array_limit', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['array_task'] = True
test_args['array_limit'] = 2
args[2].assert_called_with(
'commandfile',
**test_args
)
args[2].reset_mock()
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-x', '2', '--array_task', 'commandfile', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['array_task'] = True
test_args['array_limit'] = 2
args[2].assert_called_with(
'commandfile',
**test_args
)
def test_array_hold(self, *args):
hold_id = '20002'
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(
['--array_task', 'commandfile', '--array_hold', hold_id, ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['array_task'] = True
test_args['array_hold'] = [hold_id, ]
args[2].assert_called_with(
'commandfile',
**test_args
)
def test_job_hold(self, *args):
hold_id = '20002'
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(
['--jobhold', hold_id, 'commandfile'])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['jobhold'] = [hold_id, ]
args[2].assert_called_with(
['commandfile'],
**test_args
)
def test_array_native(self, *args):
array_desc = '1-4:2'
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(
['--array_native', array_desc, 'command', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['array_task'] = True
test_args['array_specifier'] = array_desc
args[2].assert_called_with(
['command'],
**test_args
)
def test_coprocessor(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['--coprocessor', 'cuda', '1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['coprocessor'] = 'cuda'
args[2].assert_called_with(
['1', '2', ],
**test_args
)
def test_coprocessor_toolkit(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main([
'--coprocessor', 'cuda',
'--coprocessor_toolkit', '7.5',
'1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
test_args = copy.deepcopy(self.base_args)
test_args['coprocessor'] = 'cuda'
test_args['coprocessor_toolkit'] = '7.5'
args[2].assert_called_with(
['1', '2', ],
**test_args
)
def test_coprocessor_class(self, *args):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main([
'--coprocessor', 'cuda',
'--coprocessor_class', 'K',
'1', '2', ])
sys.stdout = sys.__stdout__
self.assertEqual(
text_trap.getvalue(),
'123\n'
)
| |
if image_base:
os.environ['ALRB_CONT_UNPACKEDDIR'] = image_base
paths = [os.path.join(image_base, os.path.basename(self.imagename)),
os.path.join(image_base, self.imagename)]
local_path = get_valid_path_from_list(paths)
if local_path:
self.imagename = local_path
#if image_base and not os.path.isabs(self.imagename) and not self.imagename.startswith('docker'):
# self.imagename = os.path.join(image_base, self.imagename)
def prepare_infiles(self, data):
"""
Construct FileSpec objects for input files from raw dict `data`
:return: list of validated `FileSpec` objects
"""
# direct access handling
self.set_accessmode()
access_keys = ['allow_lan', 'allow_wan', 'direct_access_lan', 'direct_access_wan']
if not self.infosys or not self.infosys.queuedata:
self.show_access_settings(access_keys)
# form raw list data from input comma-separated values for further validation by FileSpec
kmap = self.get_kmap()
try:
ksources = dict([k, self.clean_listdata(data.get(k, ''), list, k, [])] for k in list(kmap.values())) # Python 3
except Exception:
ksources = dict([k, self.clean_listdata(data.get(k, ''), list, k, [])] for k in kmap.itervalues()) # Python 2
ret, lfns = [], set()
for ind, lfn in enumerate(ksources.get('inFiles', [])):
if lfn in ['', 'NULL'] or lfn in lfns: # exclude null data and duplicates
continue
lfns.add(lfn)
idat = {}
try:
for attrname, k in list(kmap.items()): # Python 3
idat[attrname] = ksources[k][ind] if len(ksources[k]) > ind else None
except Exception:
for attrname, k in kmap.iteritems(): # Python 2
idat[attrname] = ksources[k][ind] if len(ksources[k]) > ind else None
accessmode = 'copy' ## default settings
# for prod jobs: use remoteio if transferType=direct and prodDBlockToken!=local
# for analy jobs: use remoteio if prodDBlockToken!=local
if (self.is_analysis() or self.transfertype == 'direct') and idat.get('storage_token') != 'local': ## Job settings
accessmode = 'direct'
if self.accessmode: ## Job input options (job params) overwrite any other settings
accessmode = self.accessmode
idat['accessmode'] = accessmode
# init access setting from queuedata
if self.infosys and self.infosys.queuedata:
for key in access_keys:
idat[key] = getattr(self.infosys.queuedata, key)
finfo = FileSpec(filetype='input', **idat)
logger.info('added file \'%s\' with accessmode \'%s\'' % (lfn, accessmode))
ret.append(finfo)
return ret
def set_accessmode(self):
"""
Set the accessmode field using jobparams.
:return:
"""
self.accessmode = None
if '--accessmode=direct' in self.jobparams:
self.accessmode = 'direct'
if '--accessmode=copy' in self.jobparams or '--useLocalIO' in self.jobparams:
self.accessmode = 'copy'
@staticmethod
def show_access_settings(access_keys):
"""
Show access settings for the case job.infosys.queuedata is not initialized.
:param access_keys: list of access keys (list).
:return:
"""
dat = dict([k, getattr(FileSpec, k, None)] for k in access_keys)
try:
msg = ', '.join(["%s=%s" % (k, v) for k, v in sorted(dat.iteritems())]) # Python 2
except Exception:
msg = ', '.join(["%s=%s" % (k, v) for k, v in sorted(dat.items())]) # Python 3
logger.info('job.infosys.queuedata is not initialized: the following access settings will be used by default: %s' % msg)
@staticmethod
def get_kmap():
"""
Return the kmap dictionary for server data to pilot conversions.
:return: kmap (dict).
"""
kmap = {
# 'internal_name': 'ext_key_structure'
'lfn': 'inFiles',
##'??': 'dispatchDblock', '??define_proper_internal_name': 'dispatchDBlockToken',
'dataset': 'realDatasetsIn', 'guid': 'GUID',
'filesize': 'fsize', 'checksum': 'checksum', 'scope': 'scopeIn',
##'??define_internal_key': 'prodDBlocks',
'storage_token': 'prodDBlockToken',
'ddmendpoint': 'ddmEndPointIn',
}
return kmap
def prepare_outfiles(self, data):
"""
Construct validated FileSpec objects for output and log files from raw dict `data`
Note: final preparation for output files can only be done after the payload has finished in case the payload
has produced a job report with e.g. output file guids. This is verified in
pilot/control/payload/process_job_report().
:param data:
:return: (list of `FileSpec` for output, list of `FileSpec` for log)
"""
# form raw list data from input comma-separated values for further validataion by FileSpec
kmap = {
# 'internal_name': 'ext_key_structure'
'lfn': 'outFiles',
##'??': 'destinationDblock', '??define_proper_internal_name': 'destinationDBlockToken',
'dataset': 'realDatasets', 'scope': 'scopeOut',
##'??define_internal_key':'prodDBlocks', '??':'dispatchDBlockTokenForOut',
'ddmendpoint': 'ddmEndPointOut',
}
try:
ksources = dict([k, self.clean_listdata(data.get(k, ''), list, k, [])] for k in list(kmap.values())) # Python 3
except Exception:
ksources = dict([k, self.clean_listdata(data.get(k, ''), list, k, [])] for k in kmap.itervalues()) # Python 2
# take the logfile name from the environment first (in case of raythena and aborted pilots)
pilot_logfile = os.environ.get('PILOT_LOGFILE', None)
if pilot_logfile:
# update the data with the new name
old_logfile = data.get('logFile')
data['logFile'] = pilot_logfile
# note: the logFile also appears in the outFiles list
outfiles = ksources.get('outFiles', None)
if outfiles and old_logfile in outfiles:
# search and replace the old logfile name with the new from the environment
ksources['outFiles'] = [pilot_logfile if x == old_logfile else x for x in ksources.get('outFiles')]
log_lfn = data.get('logFile')
if log_lfn:
# unify scopeOut structure: add scope of log file
scope_out = []
for lfn in ksources.get('outFiles', []):
if lfn == log_lfn:
scope_out.append(data.get('scopeLog'))
else:
if not ksources['scopeOut']:
raise Exception('Failed to extract scopeOut parameter from Job structure sent by Panda, please check input format!')
scope_out.append(ksources['scopeOut'].pop(0))
ksources['scopeOut'] = scope_out
return self._get_all_output(ksources, kmap, log_lfn, data)
def _get_all_output(self, ksources, kmap, log_lfn, data):
"""
Create lists of FileSpecs for output + log files.
Helper function for prepare_output().
:param ksources:
:param kmap:
:param log_lfn: log file name (string).
:param data:
:return: ret_output (list of FileSpec), ret_log (list of FileSpec)
"""
ret_output, ret_log = [], []
lfns = set()
for ind, lfn in enumerate(ksources['outFiles']):
if lfn in ['', 'NULL'] or lfn in lfns: # exclude null data and duplicates
continue
lfns.add(lfn)
idat = {}
try:
for attrname, k in list(kmap.items()): # Python 3
idat[attrname] = ksources[k][ind] if len(ksources[k]) > ind else None
except Exception:
for attrname, k in kmap.iteritems(): # Python 2
idat[attrname] = ksources[k][ind] if len(ksources[k]) > ind else None
ftype = 'output'
ret = ret_output
if lfn == log_lfn: # log file case
ftype = 'log'
idat['guid'] = data.get('logGUID')
ret = ret_log
elif lfn.endswith('.lib.tgz'): # build job case, generate a guid for the lib file
idat['guid'] = get_guid()
finfo = FileSpec(filetype=ftype, **idat)
ret.append(finfo)
return ret_output, ret_log
def __getitem__(self, key):
"""
Temporary Integration function to keep dict-based access for old logic in compatible way
TO BE REMOVED ONCE all fields will be moved to Job object attributes
"""
if key == 'infosys':
return self.infosys
#if hasattr(self, key):
# return getattr(self, key)
return self._rawdata[key]
def __setitem__(self, key, val):
"""
Temporary Integration function to keep dict-based access for old logic in compatible way
TO BE REMOVED ONCE all fields will be moved to Job object attributes
"""
self._rawdata[key] = val
def __contains__(self, key):
"""
Temporary Integration function to keep dict-based access for old logic in compatible way
TO BE REMOVED ONCE all fields will be moved to Job object attributes
"""
return key in self._rawdata
def get(self, key, defval=None):
"""
Temporary Integration function to keep dict-based access for old logic in compatible way
TO BE REMOVED ONCE all fields will be moved to Job object attributes
"""
return self._rawdata.get(key, defval)
def load(self, data, use_kmap=True):
"""
Construct and initialize data from ext source
:param data: input dictionary of job data settings
"""
## the translation map of the container attributes from external data to internal schema
## 'internal_name':('ext_name1', 'extname2_if_any')
## 'internal_name2':'ext_name3'
## first defined ext field will be used
## if key is not explicitly specified then ext name will be used as is
## fix me later to proper internal names if need
kmap = {
'jobid': 'PandaID',
'taskid': 'taskID',
'jobparams': 'jobPars',
'corecount': 'coreCount',
'platform': 'cmtConfig',
'infilesguids': 'GUID', ## TO BE DEPRECATED: moved to FileSpec
'attemptnr': 'attemptNr',
'datasetin': 'realDatasetsIn', ## TO BE DEPRECATED: moved to FileSpec
'processingtype': 'processingType',
'transfertype': 'transferType',
'destinationdblock': 'destinationDblock',
'noexecstrcnv': 'noExecStrCnv',
'swrelease': 'swRelease',
'jobsetid': 'jobsetID',
'produserid': 'prodUserID',
'jobdefinitionid': 'jobDefinitionID',
'writetofile': 'writeToFile',
'is_eventservice': 'eventService',
'is_eventservicemerge': 'eventServiceMerge',
'is_hpo': 'isHPO',
'use_vp': 'useVP',
'maxcpucount': 'maxCpuCount',
'allownooutput': 'allowNoOutput',
'imagename_jobdef': 'container_name',
'containeroptions': 'containerOptions',
'looping_check': 'loopingCheck'
} if use_kmap else {}
self._load_data(data, kmap)
def is_analysis(self): ## if it's experiment specific logic then it could be isolated into extended JobDataATLAS class
"""
Determine whether the job is an analysis user job or not.
:return: True in case of user analysis job
"""
is_analysis = self.transformation.startswith('https://') or self.transformation.startswith('http://')
# apply addons checks later if need
return is_analysis
def is_build_job(self):
"""
Check if the job is a build job.
(i.e. check if the job has an output file that is a lib file).
:return: boolean
"""
for fspec in self.outdata:
if '.lib.' in fspec.lfn and '.log.' not in fspec.lfn:
return True
return False
def is_local(self): ## confusing function, since it does | |
return _gui.Gui_class_info()
Gui_class_info = _gui.Gui_class_info
def Gui____class_destructor__(instance, is_array):
return _gui.Gui____class_destructor__(instance, is_array)
Gui____class_destructor__ = _gui.Gui____class_destructor__
class GuiResource(base.CoreBaseType):
__swig_setmethods__ = {}
for _s in [base.CoreBaseType]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiResource, name, value)
__swig_getmethods__ = {}
for _s in [base.CoreBaseType]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiResource, name)
__repr__ = _swig_repr
if _newclass:
up_triangle_label = staticmethod(_gui.GuiResource_up_triangle_label)
else:
up_triangle_label = _gui.GuiResource_up_triangle_label
if _newclass:
left_triangle_label = staticmethod(_gui.GuiResource_left_triangle_label)
else:
left_triangle_label = _gui.GuiResource_left_triangle_label
if _newclass:
down_triangle_label = staticmethod(_gui.GuiResource_down_triangle_label)
else:
down_triangle_label = _gui.GuiResource_down_triangle_label
if _newclass:
right_triangle_label = staticmethod(_gui.GuiResource_right_triangle_label)
else:
right_triangle_label = _gui.GuiResource_right_triangle_label
if _newclass:
up_triangle_label_selected = staticmethod(_gui.GuiResource_up_triangle_label_selected)
else:
up_triangle_label_selected = _gui.GuiResource_up_triangle_label_selected
if _newclass:
left_triangle_label_selected = staticmethod(_gui.GuiResource_left_triangle_label_selected)
else:
left_triangle_label_selected = _gui.GuiResource_left_triangle_label_selected
if _newclass:
down_triangle_label_selected = staticmethod(_gui.GuiResource_down_triangle_label_selected)
else:
down_triangle_label_selected = _gui.GuiResource_down_triangle_label_selected
if _newclass:
right_triangle_label_selected = staticmethod(_gui.GuiResource_right_triangle_label_selected)
else:
right_triangle_label_selected = _gui.GuiResource_right_triangle_label_selected
if _newclass:
up_triangle_text = staticmethod(_gui.GuiResource_up_triangle_text)
else:
up_triangle_text = _gui.GuiResource_up_triangle_text
if _newclass:
left_triangle_text = staticmethod(_gui.GuiResource_left_triangle_text)
else:
left_triangle_text = _gui.GuiResource_left_triangle_text
if _newclass:
down_triangle_text = staticmethod(_gui.GuiResource_down_triangle_text)
else:
down_triangle_text = _gui.GuiResource_down_triangle_text
if _newclass:
right_triangle_text = staticmethod(_gui.GuiResource_right_triangle_text)
else:
right_triangle_text = _gui.GuiResource_right_triangle_text
if _newclass:
small_up_triangle = staticmethod(_gui.GuiResource_small_up_triangle)
else:
small_up_triangle = _gui.GuiResource_small_up_triangle
if _newclass:
small_left_triangle = staticmethod(_gui.GuiResource_small_left_triangle)
else:
small_left_triangle = _gui.GuiResource_small_left_triangle
if _newclass:
small_down_triangle = staticmethod(_gui.GuiResource_small_down_triangle)
else:
small_down_triangle = _gui.GuiResource_small_down_triangle
if _newclass:
small_right_triangle = staticmethod(_gui.GuiResource_small_right_triangle)
else:
small_right_triangle = _gui.GuiResource_small_right_triangle
if _newclass:
checker = staticmethod(_gui.GuiResource_checker)
else:
checker = _gui.GuiResource_checker
if _newclass:
checker_selected = staticmethod(_gui.GuiResource_checker_selected)
else:
checker_selected = _gui.GuiResource_checker_selected
if _newclass:
radio_menu = staticmethod(_gui.GuiResource_radio_menu)
else:
radio_menu = _gui.GuiResource_radio_menu
if _newclass:
radio_menu_selected = staticmethod(_gui.GuiResource_radio_menu_selected)
else:
radio_menu_selected = _gui.GuiResource_radio_menu_selected
if _newclass:
radio_menu_empty = staticmethod(_gui.GuiResource_radio_menu_empty)
else:
radio_menu_empty = _gui.GuiResource_radio_menu_empty
if _newclass:
radio_menu_empty_selected = staticmethod(_gui.GuiResource_radio_menu_empty_selected)
else:
radio_menu_empty_selected = _gui.GuiResource_radio_menu_empty_selected
if _newclass:
horizontal_minislider = staticmethod(_gui.GuiResource_horizontal_minislider)
else:
horizontal_minislider = _gui.GuiResource_horizontal_minislider
if _newclass:
horizontal_minislider_selected = staticmethod(_gui.GuiResource_horizontal_minislider_selected)
else:
horizontal_minislider_selected = _gui.GuiResource_horizontal_minislider_selected
if _newclass:
viewport_close = staticmethod(_gui.GuiResource_viewport_close)
else:
viewport_close = _gui.GuiResource_viewport_close
if _newclass:
viewport_split_horizontal = staticmethod(_gui.GuiResource_viewport_split_horizontal)
else:
viewport_split_horizontal = _gui.GuiResource_viewport_split_horizontal
if _newclass:
viewport_split_vertical = staticmethod(_gui.GuiResource_viewport_split_vertical)
else:
viewport_split_vertical = _gui.GuiResource_viewport_split_vertical
if _newclass:
viewport_split_quad = staticmethod(_gui.GuiResource_viewport_split_quad)
else:
viewport_split_quad = _gui.GuiResource_viewport_split_quad
if _newclass:
viewport_maximize = staticmethod(_gui.GuiResource_viewport_maximize)
else:
viewport_maximize = _gui.GuiResource_viewport_maximize
if _newclass:
viewport_restore = staticmethod(_gui.GuiResource_viewport_restore)
else:
viewport_restore = _gui.GuiResource_viewport_restore
if _newclass:
left_navigator_arrow = staticmethod(_gui.GuiResource_left_navigator_arrow)
else:
left_navigator_arrow = _gui.GuiResource_left_navigator_arrow
if _newclass:
right_navigator_arrow = staticmethod(_gui.GuiResource_right_navigator_arrow)
else:
right_navigator_arrow = _gui.GuiResource_right_navigator_arrow
if _newclass:
navigator_lock = staticmethod(_gui.GuiResource_navigator_lock)
else:
navigator_lock = _gui.GuiResource_navigator_lock
if _newclass:
navigator_refresh = staticmethod(_gui.GuiResource_navigator_refresh)
else:
navigator_refresh = _gui.GuiResource_navigator_refresh
if _newclass:
clear = staticmethod(_gui.GuiResource_clear)
else:
clear = _gui.GuiResource_clear
if _newclass:
trash = staticmethod(_gui.GuiResource_trash)
else:
trash = _gui.GuiResource_trash
if _newclass:
magnifier = staticmethod(_gui.GuiResource_magnifier)
else:
magnifier = _gui.GuiResource_magnifier
if _newclass:
open = staticmethod(_gui.GuiResource_open)
else:
open = _gui.GuiResource_open
if _newclass:
save = staticmethod(_gui.GuiResource_save)
else:
save = _gui.GuiResource_save
if _newclass:
mb_info = staticmethod(_gui.GuiResource_mb_info)
else:
mb_info = _gui.GuiResource_mb_info
if _newclass:
up_triangle_label_iconrc = staticmethod(_gui.GuiResource_up_triangle_label_iconrc)
else:
up_triangle_label_iconrc = _gui.GuiResource_up_triangle_label_iconrc
if _newclass:
left_triangle_label_iconrc = staticmethod(_gui.GuiResource_left_triangle_label_iconrc)
else:
left_triangle_label_iconrc = _gui.GuiResource_left_triangle_label_iconrc
if _newclass:
down_triangle_label_iconrc = staticmethod(_gui.GuiResource_down_triangle_label_iconrc)
else:
down_triangle_label_iconrc = _gui.GuiResource_down_triangle_label_iconrc
if _newclass:
right_triangle_label_iconrc = staticmethod(_gui.GuiResource_right_triangle_label_iconrc)
else:
right_triangle_label_iconrc = _gui.GuiResource_right_triangle_label_iconrc
if _newclass:
up_triangle_label_selected_iconrc = staticmethod(_gui.GuiResource_up_triangle_label_selected_iconrc)
else:
up_triangle_label_selected_iconrc = _gui.GuiResource_up_triangle_label_selected_iconrc
if _newclass:
left_triangle_label_selected_iconrc = staticmethod(_gui.GuiResource_left_triangle_label_selected_iconrc)
else:
left_triangle_label_selected_iconrc = _gui.GuiResource_left_triangle_label_selected_iconrc
if _newclass:
down_triangle_label_selected_iconrc = staticmethod(_gui.GuiResource_down_triangle_label_selected_iconrc)
else:
down_triangle_label_selected_iconrc = _gui.GuiResource_down_triangle_label_selected_iconrc
if _newclass:
right_triangle_label_selected_iconrc = staticmethod(_gui.GuiResource_right_triangle_label_selected_iconrc)
else:
right_triangle_label_selected_iconrc = _gui.GuiResource_right_triangle_label_selected_iconrc
if _newclass:
up_triangle_text_iconrc = staticmethod(_gui.GuiResource_up_triangle_text_iconrc)
else:
up_triangle_text_iconrc = _gui.GuiResource_up_triangle_text_iconrc
if _newclass:
left_triangle_text_iconrc = staticmethod(_gui.GuiResource_left_triangle_text_iconrc)
else:
left_triangle_text_iconrc = _gui.GuiResource_left_triangle_text_iconrc
if _newclass:
down_triangle_text_iconrc = staticmethod(_gui.GuiResource_down_triangle_text_iconrc)
else:
down_triangle_text_iconrc = _gui.GuiResource_down_triangle_text_iconrc
if _newclass:
right_triangle_text_iconrc = staticmethod(_gui.GuiResource_right_triangle_text_iconrc)
else:
right_triangle_text_iconrc = _gui.GuiResource_right_triangle_text_iconrc
if _newclass:
small_up_triangle_iconrc = staticmethod(_gui.GuiResource_small_up_triangle_iconrc)
else:
small_up_triangle_iconrc = _gui.GuiResource_small_up_triangle_iconrc
if _newclass:
small_left_triangle_iconrc = staticmethod(_gui.GuiResource_small_left_triangle_iconrc)
else:
small_left_triangle_iconrc = _gui.GuiResource_small_left_triangle_iconrc
if _newclass:
small_down_triangle_iconrc = staticmethod(_gui.GuiResource_small_down_triangle_iconrc)
else:
small_down_triangle_iconrc = _gui.GuiResource_small_down_triangle_iconrc
if _newclass:
small_right_triangle_iconrc = staticmethod(_gui.GuiResource_small_right_triangle_iconrc)
else:
small_right_triangle_iconrc = _gui.GuiResource_small_right_triangle_iconrc
if _newclass:
checker_iconrc = staticmethod(_gui.GuiResource_checker_iconrc)
else:
checker_iconrc = _gui.GuiResource_checker_iconrc
if _newclass:
checker_selected_iconrc = staticmethod(_gui.GuiResource_checker_selected_iconrc)
else:
checker_selected_iconrc = _gui.GuiResource_checker_selected_iconrc
if _newclass:
radio_menu_iconrc = staticmethod(_gui.GuiResource_radio_menu_iconrc)
else:
radio_menu_iconrc = _gui.GuiResource_radio_menu_iconrc
if _newclass:
radio_menu_selected_iconrc = staticmethod(_gui.GuiResource_radio_menu_selected_iconrc)
else:
radio_menu_selected_iconrc = _gui.GuiResource_radio_menu_selected_iconrc
if _newclass:
radio_menu_empty_iconrc = staticmethod(_gui.GuiResource_radio_menu_empty_iconrc)
else:
radio_menu_empty_iconrc = _gui.GuiResource_radio_menu_empty_iconrc
if _newclass:
radio_menu_empty_selected_iconrc = staticmethod(_gui.GuiResource_radio_menu_empty_selected_iconrc)
else:
radio_menu_empty_selected_iconrc = _gui.GuiResource_radio_menu_empty_selected_iconrc
if _newclass:
horizontal_minislider_iconrc = staticmethod(_gui.GuiResource_horizontal_minislider_iconrc)
else:
horizontal_minislider_iconrc = _gui.GuiResource_horizontal_minislider_iconrc
if _newclass:
horizontal_minislider_selected_iconrc = staticmethod(_gui.GuiResource_horizontal_minislider_selected_iconrc)
else:
horizontal_minislider_selected_iconrc = _gui.GuiResource_horizontal_minislider_selected_iconrc
if _newclass:
viewport_close_iconrc = staticmethod(_gui.GuiResource_viewport_close_iconrc)
else:
viewport_close_iconrc = _gui.GuiResource_viewport_close_iconrc
if _newclass:
viewport_split_horizontal_iconrc = staticmethod(_gui.GuiResource_viewport_split_horizontal_iconrc)
else:
viewport_split_horizontal_iconrc = _gui.GuiResource_viewport_split_horizontal_iconrc
if _newclass:
viewport_split_vertical_iconrc = staticmethod(_gui.GuiResource_viewport_split_vertical_iconrc)
else:
viewport_split_vertical_iconrc = _gui.GuiResource_viewport_split_vertical_iconrc
if _newclass:
viewport_split_quad_iconrc = staticmethod(_gui.GuiResource_viewport_split_quad_iconrc)
else:
viewport_split_quad_iconrc = _gui.GuiResource_viewport_split_quad_iconrc
if _newclass:
viewport_maximize_iconrc = staticmethod(_gui.GuiResource_viewport_maximize_iconrc)
else:
viewport_maximize_iconrc = _gui.GuiResource_viewport_maximize_iconrc
if _newclass:
viewport_restore_iconrc = staticmethod(_gui.GuiResource_viewport_restore_iconrc)
else:
viewport_restore_iconrc = _gui.GuiResource_viewport_restore_iconrc
if _newclass:
left_navigator_arrow_iconrc = staticmethod(_gui.GuiResource_left_navigator_arrow_iconrc)
else:
left_navigator_arrow_iconrc = _gui.GuiResource_left_navigator_arrow_iconrc
if _newclass:
right_navigator_arrow_iconrc = staticmethod(_gui.GuiResource_right_navigator_arrow_iconrc)
else:
right_navigator_arrow_iconrc = _gui.GuiResource_right_navigator_arrow_iconrc
if _newclass:
navigator_lock_iconrc = staticmethod(_gui.GuiResource_navigator_lock_iconrc)
else:
navigator_lock_iconrc = _gui.GuiResource_navigator_lock_iconrc
if _newclass:
navigator_refresh_iconrc = staticmethod(_gui.GuiResource_navigator_refresh_iconrc)
else:
navigator_refresh_iconrc = _gui.GuiResource_navigator_refresh_iconrc
if _newclass:
clear_iconrc = staticmethod(_gui.GuiResource_clear_iconrc)
else:
clear_iconrc = _gui.GuiResource_clear_iconrc
if _newclass:
trash_iconrc = staticmethod(_gui.GuiResource_trash_iconrc)
else:
trash_iconrc = _gui.GuiResource_trash_iconrc
if _newclass:
magnifier_iconrc = staticmethod(_gui.GuiResource_magnifier_iconrc)
else:
magnifier_iconrc = _gui.GuiResource_magnifier_iconrc
if _newclass:
open_iconrc = staticmethod(_gui.GuiResource_open_iconrc)
else:
open_iconrc = _gui.GuiResource_open_iconrc
if _newclass:
save_iconrc = staticmethod(_gui.GuiResource_save_iconrc)
else:
save_iconrc = _gui.GuiResource_save_iconrc
if _newclass:
mb_info_iconrc = staticmethod(_gui.GuiResource_mb_info_iconrc)
else:
mb_info_iconrc = _gui.GuiResource_mb_info_iconrc
if _newclass:
eye_iconrc = staticmethod(_gui.GuiResource_eye_iconrc)
else:
eye_iconrc = _gui.GuiResource_eye_iconrc
if _newclass:
eye_selected_iconrc = staticmethod(_gui.GuiResource_eye_selected_iconrc)
else:
eye_selected_iconrc = _gui.GuiResource_eye_selected_iconrc
if _newclass:
class_icon_iconrc = staticmethod(_gui.GuiResource_class_icon_iconrc)
else:
class_icon_iconrc = _gui.GuiResource_class_icon_iconrc
if _newclass:
unknown_iconrc = staticmethod(_gui.GuiResource_unknown_iconrc)
else:
unknown_iconrc = _gui.GuiResource_unknown_iconrc
if _newclass:
flag_source_iconrc = staticmethod(_gui.GuiResource_flag_source_iconrc)
else:
flag_source_iconrc = _gui.GuiResource_flag_source_iconrc
if _newclass:
attribute_iconrc = staticmethod(_gui.GuiResource_attribute_iconrc)
else:
attribute_iconrc = _gui.GuiResource_attribute_iconrc
if _newclass:
context_iconrc = staticmethod(_gui.GuiResource_context_iconrc)
else:
context_iconrc = _gui.GuiResource_context_iconrc
if _newclass:
flag_localized_iconrc = staticmethod(_gui.GuiResource_flag_localized_iconrc)
else:
flag_localized_iconrc = _gui.GuiResource_flag_localized_iconrc
if _newclass:
flag_instance_iconrc = staticmethod(_gui.GuiResource_flag_instance_iconrc)
else:
flag_instance_iconrc = _gui.GuiResource_flag_instance_iconrc
if _newclass:
flag_reference_iconrc = staticmethod(_gui.GuiResource_flag_reference_iconrc)
else:
flag_reference_iconrc = _gui.GuiResource_flag_reference_iconrc
if _newclass:
class_info = staticmethod(_gui.GuiResource_class_info)
else:
class_info = _gui.GuiResource_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiResource____class_destructor__)
else:
___class_destructor__ = _gui.GuiResource____class_destructor__
def get_class_info(self):
return _gui.GuiResource_get_class_info(self)
def __init__(self):
this = _gui.new_GuiResource()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiResource
__del__ = lambda self: None
GuiResource_swigregister = _gui.GuiResource_swigregister
GuiResource_swigregister(GuiResource)
def GuiResource_up_triangle_label():
return _gui.GuiResource_up_triangle_label()
GuiResource_up_triangle_label = _gui.GuiResource_up_triangle_label
def GuiResource_left_triangle_label():
return _gui.GuiResource_left_triangle_label()
GuiResource_left_triangle_label = _gui.GuiResource_left_triangle_label
def GuiResource_down_triangle_label():
return _gui.GuiResource_down_triangle_label()
GuiResource_down_triangle_label = _gui.GuiResource_down_triangle_label
def GuiResource_right_triangle_label():
return _gui.GuiResource_right_triangle_label()
GuiResource_right_triangle_label = _gui.GuiResource_right_triangle_label
def GuiResource_up_triangle_label_selected():
return _gui.GuiResource_up_triangle_label_selected()
GuiResource_up_triangle_label_selected = _gui.GuiResource_up_triangle_label_selected
def GuiResource_left_triangle_label_selected():
return _gui.GuiResource_left_triangle_label_selected()
GuiResource_left_triangle_label_selected = _gui.GuiResource_left_triangle_label_selected
def GuiResource_down_triangle_label_selected():
return _gui.GuiResource_down_triangle_label_selected()
GuiResource_down_triangle_label_selected = _gui.GuiResource_down_triangle_label_selected
def GuiResource_right_triangle_label_selected():
return _gui.GuiResource_right_triangle_label_selected()
GuiResource_right_triangle_label_selected = _gui.GuiResource_right_triangle_label_selected
def GuiResource_up_triangle_text():
return _gui.GuiResource_up_triangle_text()
GuiResource_up_triangle_text = _gui.GuiResource_up_triangle_text
def GuiResource_left_triangle_text():
return _gui.GuiResource_left_triangle_text()
GuiResource_left_triangle_text = _gui.GuiResource_left_triangle_text
def GuiResource_down_triangle_text():
return _gui.GuiResource_down_triangle_text()
GuiResource_down_triangle_text = _gui.GuiResource_down_triangle_text
def GuiResource_right_triangle_text():
return _gui.GuiResource_right_triangle_text()
GuiResource_right_triangle_text = _gui.GuiResource_right_triangle_text
def GuiResource_small_up_triangle():
return _gui.GuiResource_small_up_triangle()
GuiResource_small_up_triangle = _gui.GuiResource_small_up_triangle
def GuiResource_small_left_triangle():
return _gui.GuiResource_small_left_triangle()
GuiResource_small_left_triangle = _gui.GuiResource_small_left_triangle
def GuiResource_small_down_triangle():
return _gui.GuiResource_small_down_triangle()
GuiResource_small_down_triangle = _gui.GuiResource_small_down_triangle
def GuiResource_small_right_triangle():
return _gui.GuiResource_small_right_triangle()
GuiResource_small_right_triangle = _gui.GuiResource_small_right_triangle
def GuiResource_checker():
return _gui.GuiResource_checker()
GuiResource_checker = _gui.GuiResource_checker
def GuiResource_checker_selected():
return _gui.GuiResource_checker_selected()
GuiResource_checker_selected = _gui.GuiResource_checker_selected
def GuiResource_radio_menu():
return _gui.GuiResource_radio_menu()
GuiResource_radio_menu = _gui.GuiResource_radio_menu
def GuiResource_radio_menu_selected():
return _gui.GuiResource_radio_menu_selected()
GuiResource_radio_menu_selected = _gui.GuiResource_radio_menu_selected
def GuiResource_radio_menu_empty():
return _gui.GuiResource_radio_menu_empty()
GuiResource_radio_menu_empty = _gui.GuiResource_radio_menu_empty
def GuiResource_radio_menu_empty_selected():
return _gui.GuiResource_radio_menu_empty_selected()
GuiResource_radio_menu_empty_selected = _gui.GuiResource_radio_menu_empty_selected
def GuiResource_horizontal_minislider():
return _gui.GuiResource_horizontal_minislider()
GuiResource_horizontal_minislider = _gui.GuiResource_horizontal_minislider
def GuiResource_horizontal_minislider_selected():
return _gui.GuiResource_horizontal_minislider_selected()
GuiResource_horizontal_minislider_selected = _gui.GuiResource_horizontal_minislider_selected
def GuiResource_viewport_close():
return _gui.GuiResource_viewport_close()
GuiResource_viewport_close = _gui.GuiResource_viewport_close
def GuiResource_viewport_split_horizontal():
return _gui.GuiResource_viewport_split_horizontal()
GuiResource_viewport_split_horizontal = _gui.GuiResource_viewport_split_horizontal
def GuiResource_viewport_split_vertical():
return _gui.GuiResource_viewport_split_vertical()
GuiResource_viewport_split_vertical = _gui.GuiResource_viewport_split_vertical
def GuiResource_viewport_split_quad():
return _gui.GuiResource_viewport_split_quad()
GuiResource_viewport_split_quad = _gui.GuiResource_viewport_split_quad
def GuiResource_viewport_maximize():
return _gui.GuiResource_viewport_maximize()
GuiResource_viewport_maximize = _gui.GuiResource_viewport_maximize
def GuiResource_viewport_restore():
return _gui.GuiResource_viewport_restore()
GuiResource_viewport_restore = _gui.GuiResource_viewport_restore
def GuiResource_left_navigator_arrow():
return _gui.GuiResource_left_navigator_arrow()
GuiResource_left_navigator_arrow = _gui.GuiResource_left_navigator_arrow
def GuiResource_right_navigator_arrow():
return _gui.GuiResource_right_navigator_arrow()
GuiResource_right_navigator_arrow = _gui.GuiResource_right_navigator_arrow
def GuiResource_navigator_lock():
return _gui.GuiResource_navigator_lock()
GuiResource_navigator_lock = _gui.GuiResource_navigator_lock
def GuiResource_navigator_refresh():
return _gui.GuiResource_navigator_refresh()
GuiResource_navigator_refresh = _gui.GuiResource_navigator_refresh
def GuiResource_clear():
return _gui.GuiResource_clear()
GuiResource_clear = _gui.GuiResource_clear
def GuiResource_trash():
return _gui.GuiResource_trash()
GuiResource_trash = _gui.GuiResource_trash
def GuiResource_magnifier():
return _gui.GuiResource_magnifier()
GuiResource_magnifier = _gui.GuiResource_magnifier
def GuiResource_open():
return _gui.GuiResource_open()
GuiResource_open = _gui.GuiResource_open
def GuiResource_save():
return _gui.GuiResource_save()
GuiResource_save = _gui.GuiResource_save
def GuiResource_mb_info():
return _gui.GuiResource_mb_info()
GuiResource_mb_info = _gui.GuiResource_mb_info
def GuiResource_up_triangle_label_iconrc():
return _gui.GuiResource_up_triangle_label_iconrc()
GuiResource_up_triangle_label_iconrc = _gui.GuiResource_up_triangle_label_iconrc
def GuiResource_left_triangle_label_iconrc():
return _gui.GuiResource_left_triangle_label_iconrc()
GuiResource_left_triangle_label_iconrc = _gui.GuiResource_left_triangle_label_iconrc
def GuiResource_down_triangle_label_iconrc():
return _gui.GuiResource_down_triangle_label_iconrc()
GuiResource_down_triangle_label_iconrc = _gui.GuiResource_down_triangle_label_iconrc
def GuiResource_right_triangle_label_iconrc():
return _gui.GuiResource_right_triangle_label_iconrc()
GuiResource_right_triangle_label_iconrc = _gui.GuiResource_right_triangle_label_iconrc
def GuiResource_up_triangle_label_selected_iconrc():
return _gui.GuiResource_up_triangle_label_selected_iconrc()
GuiResource_up_triangle_label_selected_iconrc = _gui.GuiResource_up_triangle_label_selected_iconrc
def GuiResource_left_triangle_label_selected_iconrc():
return _gui.GuiResource_left_triangle_label_selected_iconrc()
GuiResource_left_triangle_label_selected_iconrc = _gui.GuiResource_left_triangle_label_selected_iconrc
def GuiResource_down_triangle_label_selected_iconrc():
return _gui.GuiResource_down_triangle_label_selected_iconrc()
GuiResource_down_triangle_label_selected_iconrc = _gui.GuiResource_down_triangle_label_selected_iconrc
def GuiResource_right_triangle_label_selected_iconrc():
return _gui.GuiResource_right_triangle_label_selected_iconrc()
GuiResource_right_triangle_label_selected_iconrc = _gui.GuiResource_right_triangle_label_selected_iconrc
def GuiResource_up_triangle_text_iconrc():
return _gui.GuiResource_up_triangle_text_iconrc()
GuiResource_up_triangle_text_iconrc = _gui.GuiResource_up_triangle_text_iconrc
def GuiResource_left_triangle_text_iconrc():
return _gui.GuiResource_left_triangle_text_iconrc()
GuiResource_left_triangle_text_iconrc = _gui.GuiResource_left_triangle_text_iconrc
def GuiResource_down_triangle_text_iconrc():
return _gui.GuiResource_down_triangle_text_iconrc()
GuiResource_down_triangle_text_iconrc = _gui.GuiResource_down_triangle_text_iconrc
def GuiResource_right_triangle_text_iconrc():
return _gui.GuiResource_right_triangle_text_iconrc()
GuiResource_right_triangle_text_iconrc = _gui.GuiResource_right_triangle_text_iconrc
def GuiResource_small_up_triangle_iconrc():
return _gui.GuiResource_small_up_triangle_iconrc()
GuiResource_small_up_triangle_iconrc = _gui.GuiResource_small_up_triangle_iconrc
def GuiResource_small_left_triangle_iconrc():
return _gui.GuiResource_small_left_triangle_iconrc()
GuiResource_small_left_triangle_iconrc = _gui.GuiResource_small_left_triangle_iconrc
def GuiResource_small_down_triangle_iconrc():
return _gui.GuiResource_small_down_triangle_iconrc()
GuiResource_small_down_triangle_iconrc = _gui.GuiResource_small_down_triangle_iconrc
def GuiResource_small_right_triangle_iconrc():
return _gui.GuiResource_small_right_triangle_iconrc()
GuiResource_small_right_triangle_iconrc = _gui.GuiResource_small_right_triangle_iconrc
def GuiResource_checker_iconrc():
return _gui.GuiResource_checker_iconrc()
GuiResource_checker_iconrc = _gui.GuiResource_checker_iconrc
def GuiResource_checker_selected_iconrc():
return _gui.GuiResource_checker_selected_iconrc()
GuiResource_checker_selected_iconrc = _gui.GuiResource_checker_selected_iconrc
def GuiResource_radio_menu_iconrc():
return _gui.GuiResource_radio_menu_iconrc()
GuiResource_radio_menu_iconrc = _gui.GuiResource_radio_menu_iconrc
def GuiResource_radio_menu_selected_iconrc():
return _gui.GuiResource_radio_menu_selected_iconrc()
GuiResource_radio_menu_selected_iconrc = _gui.GuiResource_radio_menu_selected_iconrc
def GuiResource_radio_menu_empty_iconrc():
return _gui.GuiResource_radio_menu_empty_iconrc()
GuiResource_radio_menu_empty_iconrc = _gui.GuiResource_radio_menu_empty_iconrc
def GuiResource_radio_menu_empty_selected_iconrc():
return _gui.GuiResource_radio_menu_empty_selected_iconrc()
GuiResource_radio_menu_empty_selected_iconrc = _gui.GuiResource_radio_menu_empty_selected_iconrc
def GuiResource_horizontal_minislider_iconrc():
return _gui.GuiResource_horizontal_minislider_iconrc()
GuiResource_horizontal_minislider_iconrc = _gui.GuiResource_horizontal_minislider_iconrc
def GuiResource_horizontal_minislider_selected_iconrc():
return _gui.GuiResource_horizontal_minislider_selected_iconrc()
GuiResource_horizontal_minislider_selected_iconrc = _gui.GuiResource_horizontal_minislider_selected_iconrc
def GuiResource_viewport_close_iconrc():
return _gui.GuiResource_viewport_close_iconrc()
GuiResource_viewport_close_iconrc = _gui.GuiResource_viewport_close_iconrc
def GuiResource_viewport_split_horizontal_iconrc():
return _gui.GuiResource_viewport_split_horizontal_iconrc()
GuiResource_viewport_split_horizontal_iconrc = _gui.GuiResource_viewport_split_horizontal_iconrc
def GuiResource_viewport_split_vertical_iconrc():
return _gui.GuiResource_viewport_split_vertical_iconrc()
GuiResource_viewport_split_vertical_iconrc = _gui.GuiResource_viewport_split_vertical_iconrc
def GuiResource_viewport_split_quad_iconrc():
return _gui.GuiResource_viewport_split_quad_iconrc()
GuiResource_viewport_split_quad_iconrc = _gui.GuiResource_viewport_split_quad_iconrc
def GuiResource_viewport_maximize_iconrc():
return _gui.GuiResource_viewport_maximize_iconrc()
GuiResource_viewport_maximize_iconrc = _gui.GuiResource_viewport_maximize_iconrc
def GuiResource_viewport_restore_iconrc():
return _gui.GuiResource_viewport_restore_iconrc()
GuiResource_viewport_restore_iconrc = _gui.GuiResource_viewport_restore_iconrc
def GuiResource_left_navigator_arrow_iconrc():
return _gui.GuiResource_left_navigator_arrow_iconrc()
GuiResource_left_navigator_arrow_iconrc = _gui.GuiResource_left_navigator_arrow_iconrc
def GuiResource_right_navigator_arrow_iconrc():
return _gui.GuiResource_right_navigator_arrow_iconrc()
GuiResource_right_navigator_arrow_iconrc = _gui.GuiResource_right_navigator_arrow_iconrc
def GuiResource_navigator_lock_iconrc():
return _gui.GuiResource_navigator_lock_iconrc()
GuiResource_navigator_lock_iconrc = _gui.GuiResource_navigator_lock_iconrc
def GuiResource_navigator_refresh_iconrc():
return _gui.GuiResource_navigator_refresh_iconrc()
GuiResource_navigator_refresh_iconrc = _gui.GuiResource_navigator_refresh_iconrc
def GuiResource_clear_iconrc():
return _gui.GuiResource_clear_iconrc()
GuiResource_clear_iconrc | |
a compatibility measure, allowing a checkpointable
object to add dependencies on variables created in a block of code which is
not aware of object-based saving (and instead uses variable names
heavily). This is how `Template` objects add dependencies on variables and
sub-`Template`s. Where possible, use `tf.make_template` directly.
Args:
template: The `Template` object to register dependencies with.
Yields:
None (when used as a context manager).
"""
name_prefix = template.variable_scope.name
def _checkpointable_custom_creator(next_creator, name, initial_value,
checkpointable_parent=None, **kwargs):
"""A variable creation hook which adds Checkpointable dependencies.
Set for example during a `Template`'s first wrapped function
execution. Ensures that (a) `template` depends on any checkpointable
objects using their own `capture_dependencies` scope inside this scope which
create variables, and (b) that any variables not in a more deeply nested
scope are added as dependencies directly.
The `checkpointable_parent` argument is passed between custom creators but
ignored when the variable object itself is created. This argument indicates
(if not `None`) that a more deeply nested scope has already added the
variable as a dependency, and that parent scopes should add a dependency on
that object rather than on the variable directly.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
name: The (full, scope-influenced) name of the variable. The `name_prefix`
itself is stripped for the purposes of object-based dependency tracking,
but scopes opened within this scope are respected.
initial_value: See `variable_scope.variable_creator_scope`. Taken
explicitly so the argument can be re-named and used with
`Checkpointable._add_variable_with_custom_getter`.
checkpointable_parent: If not None, a more deeply nested checkpointable
object and its name prefix which were passed to `capture_dependencies`
to add a dependency on (rather than depending on the variable directly).
**kwargs: Passed through to the next creator.
Returns:
The output of `next_creator`: the fetched/created variable object.
"""
def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):
inner_kwargs.pop("name") # Ignored; this is the scope-stripped name which
# we don't want to propagate.
return next_creator(
initial_value=initializer,
name=name,
**inner_kwargs)
if name.startswith(name_prefix):
scope_stripped_name = name[len(name_prefix) + 1:]
if not checkpointable_parent:
return template._add_variable_with_custom_getter( # pylint: disable=protected-access
initializer=initial_value,
name=scope_stripped_name,
getter=_call_next_creator_renaming_initializer,
# Disable error checking for Checkpointable. Exceptions are instead
# raised if necessary when the object-based saver tries to
# save/restore the object.
overwrite=True,
checkpointable_parent=(template, name_prefix),
**kwargs)
else:
parent_object, parent_name_prefix = checkpointable_parent
template._track_checkpointable( # pylint: disable=protected-access
parent_object,
name=parent_name_prefix[len(name_prefix) + 1:],
overwrite=True)
return next_creator(
name=name, initial_value=initial_value,
checkpointable_parent=(template, name_prefix), **kwargs)
with variable_scope.variable_creator_scope(_checkpointable_custom_creator):
yield
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict, root_checkpointable):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
for checkpointable_object in list_objects(self._root_checkpointable):
self._checkpoint.all_python_objects.add(checkpointable_object)
unused_python_objects = (
set(self._checkpoint.all_python_objects)
- set(self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
("Some Python objects were not bound to checkpointed values, likely "
"due to changes in the Python program: %s")
% (unused_python_objects,))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Run operations to initialize or restore objects in the dependency graph.
Any objects in the dependency graph which have initializers but are not in
the checkpoint will have those initializers run, unless those variables are
being restored by a later call to `tf.train.Checkpoint.restore()`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run init/restore ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # Initialization and restoration ops are run eagerly
if session is None:
session = ops.get_default_session()
all_objects = list_objects(self._root_checkpointable)
already_initialized_objects = set(
self._checkpoint.object_by_proto_id.values())
initializers_for_non_restored_variables = [
c.initializer for c in all_objects
if hasattr(c, "initializer")
and c not in already_initialized_objects
and (getattr(c, "_update_uid", self._checkpoint.restore_uid - 1)
< self._checkpoint.restore_uid)]
self.run_restore_ops(session=session)
session.run(initializers_for_non_restored_variables)
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, root_checkpointable, restore_uid):
self._restore_uid = restore_uid
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Objects which would be saved by `Saver.save` will be initialized, unless
those variables are being restored by a later call to
`tf.train.Checkpoint.restore()`.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = ops.get_default_session()
checkpointable_objects = list_objects(self._root_checkpointable)
initializers = [
c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None
and (getattr(c, "_update_uid", self._restore_uid - 1)
< self._restore_uid)]
session.run(initializers)
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called when graph building. Prefer re-encoding training checkpoints in "
"the object-based format: run save() on the object-based saver (the same "
"one this message is coming from) and use that checkpoint in the future.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
def __init__(self, checkpoint, root_checkpointable):
self._checkpoint = checkpoint
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Raises an exception if any variables/objects are unmatched."""
unused_attributes = dict(self._checkpoint.unused_attributes)
if unused_attributes:
raise AssertionError(
"Some objects had attributes which were not restored: %s"
% (unused_attributes,))
for checkpointable in list_objects(self._root_checkpointable):
# pylint: disable=protected-access
checkpointable._maybe_initialize_checkpointable()
if checkpointable._update_uid < self._checkpoint.restore_uid:
raise AssertionError("Object not restored: %s" % (checkpointable,))
# pylint: enable=protected-access
def _gather_saveable_objects(self):
"""Walk the object | |
"readonly": False},
{"prefix": "TEC", "id": 6004, "name": "ObjMeasADCCalibGain", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6005, "name": "ObjMeasSensorTypeSelection", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6010, "name": "SinMeasADCRv", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6013, "name": "SinMeasADCVps", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6011, "name": "SinMeasADCCalibOffset", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6012, "name": "SinMeasADCCalibGain", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6020, "name": "DisplayType", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6023, "name": "AlternativeMode", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6024, "name": "DisplayLineDefText", "type": str, "mepar_type": MeParType.LATIN1, "readonly": False},
{"prefix": "TEC", "id": 6025, "name": "DisplayLineAltText", "type": str, "mepar_type": MeParType.LATIN1, "readonly": False},
{"prefix": "TEC", "id": 6026, "name": "DisplayLineAltMode", "type": str, "mepar_type": MeParType.LATIN1, "readonly": False},
{"prefix": "TEC", "id": 6100, "name": "PbcFunction", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6110, "name": "ChangeButtonLowTemperature", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6111, "name": "ChangeButtonHighTemperature", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6112, "name": "ChangeButtonStepSize", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6200, "name": "FanControlEnable", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6210, "name": "FanActualTempSource", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6211, "name": "FanTargetTemp", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6212, "name": "FanTempKp", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6213, "name": "FanTempTi", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6214, "name": "FanTempTd", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6220, "name": "FanSpeedMin", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6221, "name": "FanSpeedMax", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6222, "name": "FanSpeedKp", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6223, "name": "FanSpeedTi", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6224, "name": "FanSpeedTd", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6225, "name": "FanSpeedBypass", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6230, "name": "PwmFrequency", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6300, "name": "MiscActObjectTempSource", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 6310, "name": "MiscDelayTillReset", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 6320, "name": "MiscError108Delay", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 50000, "name": "LiveEnable", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 50001, "name": "LiveSetCurrent", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 50002, "name": "LiveSetVoltage", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 50010, "name": "SineRampStartPoint", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 50011, "name": "ObjectTargetTempSourceSelection", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 50012, "name": "ObjectTargetTemperature", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False},
{"prefix": "TEC", "id": 51000, "name": "AtmAutoTuningStart", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 51001, "name": "AtmAutoTuningCancel", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 51002, "name": "AtmThermalModelSpeed", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 51010, "name": "AtmTuningParameter2A", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51011, "name": "AtmTuningParameter2D", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51012, "name": "AtmTuningParameterKu", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51013, "name": "AtmTuningParameterTu", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51014, "name": "AtmPIDParameterKp", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51015, "name": "AtmPIDParameterTi", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51016, "name": "AtmPIDParameterTd", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51022, "name": "AtmSlowPIParameterKp", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51023, "name": "AtmSlowPIParameterTi", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51024, "name": "AtmPIDDPartDamping", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51017, "name": "AtmCoarseTempRamp", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51018, "name": "AtmProximityWidth", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 51020, "name": "AtmTuningStatus", "type": int, "mepar_type": MeParType.INT32, "readonly": True},
{"prefix": "TEC", "id": 51021, "name": "AtmTuningProgress", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": True},
{"prefix": "TEC", "id": 52000, "name": "LutTableStart", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52001, "name": "LutTableStop", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52002, "name": "LutTableStatus", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52003, "name": "LutCurrentTableLine", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52010, "name": "LutTableIDSelection", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52012, "name": "LutNrOfRepetitions", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52100, "name": "PbcEnableFunction", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52101, "name": "PbcSetOutputToPushPull", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52102, "name": "PbcSetOutputStates", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52103, "name": "PbcReadInputStates", "type": int, "mepar_type": MeParType.INT32, "readonly": False},
{"prefix": "TEC", "id": 52200, "name": "ExternalActualObjectTemperature", "type": float, "mepar_type": MeParType.FLOAT32, "readonly": False}
]
TEC_ERRORS = {
0: "No Error",
1: "Processor Error:\nMCU system malfunction",
2: "Processor Error:\nMCU system malfunction",
3: "Processor Error:\nMCU system malfunction",
4: "Processor Error:\nMCU system malfunction",
5: "Processor Error:\nMCU system malfunction",
6: "Processor Error:\nMCU system malfunction",
7: "Processor Error:\nMCU system malfunction",
8: "Processor Error:\nMCU system malfunction",
9: "Processor Error:\nMCU system malfunction",
10: "Processor Error:\nMCU system malfunction",
11: "LTR-1200 Emergency Stop",
12: "LTR-1200 HMI Free Timeout",
20: "Parameter Error:\nInternal parameter system malfunction",
21: "Parameter Error:\nInternal parameter system malfunction",
22: "Parameter Error:\nParameter set corrupt (Configuration flash empty or defect)\n\nLoad .ini file saved prior to FW update, or Default.ini",
23: "Parameter Error:\nParameter set incompatible with current firmware version\n\nLoad .ini file saved prior to FW update, or Default.ini",
24: "Parameter Error:\nFirmware does not recognize valid device",
25: "Parameter Error:\nInternal parameter system malfunction (Access to a non-existing instance)",
26: "Parameter Error:\nInternal limit system malfunction",
27: "Parameter Error:\nParameter write or read wrong datatype function used",
28: "Parameter Error:\nParameter write value out of range",
29: "Parameter Error:\nParameter save to flash called from interrupt.",
30: "Power Supply Error:\nInput voltage net < Hard Limit",
31: "Power Supply Error:\nInput voltage net > Hard Limit",
32: "Power Supply Error:\nInternal Medium Voltage power net < Hard Limit",
33: "Power Supply Error:\nInternal Medium Voltage power net > Hard Limit",
36: "Power Supply Error:\nInternal 3.3V power net < Hard Limit",
37: "Power Supply Error:\nInternal 3.3V power net > Hard Limit",
50: "Flash Error:\nOn-board flash failure\nWrite Timeout",
51: "Flash Error:\nOn-board flash failure\nErase Timeout",
52: "Flash Error:\nOn-board flash failure\nInvalid Address",
53: "UART Error:\nSend buffer overflow error",
60: "Temperature Error:\nDevice running too hot",
61: "External Hardware Error:\nCommunication error with I/O hardware during factory test",
100: "TEC Power Output Error:\nOvercurrent (positive) at OUT+\n[Channel CHx according to instance]\n\nCurrent > 'Current Error Threshold' [user set]",
101: "TEC Power Output Error:\nOvercurrent (negative) at OUT+\n[Channel CHx according to instance]\n\n|Current| > 'Current Error Threshold' [user set]",
102: "TEC Power Output Error:\nOvercurrent (positive) at OUT-\n[Channel CHx according to instance]\n\nCurrent > 'Current Error Threshold' [user set]",
103: "TEC Power Output Error:\nOvercurrent (negative) at OUT-\n[Channel CHx according to instance]\n\n|Current| > 'Current Error Threshold' [user set]",
104: "TEC Power Output Error:\nOvervoltage at OUT+\n[Channel CHx according to instance]\n\nVoltage > 'Voltage Error Threshold' [user set]",
105: "TEC Power Output Error:\nOvervoltage at OUT-\n[Channel CHx according to instance]\n\nVoltage > 'Voltage Error Threshold' [user set]",
106: "TEC Power Output Error:\nResidual current too high. The Current difference between OUT+ and OUT- is too big.\nLAbs(I+ - I-) > Imax *0.1\n\nCheck output cables for insulation defects",
107: "TEC Power Output Error:\nOverall current monitoring\nFast switch off (reacts within 10 us)",
108: "TEC Power Output Error:\nOutput Stage is in saturation for more than 1ms (by default)\n\nCheck input current is sufficient and Vout not set too close to Vin. Try to reduce the 'Current Limitation' or 'Voltage Limitation' in the Operation tab!",
109: "TEC Power Output Error:\nCurrents through Drivers OUT+ and OUT- too unequal\n[Channel CHx according to instance]\n\nLeak current at output, faulty current detection",
110: "TEC Power Output Error:\nAllowed total output power reached\n\nReduce Output Power",
111: "TEC Power Output Error:\nThe connected load has a too low resistance in compare to the input voltage.\n\nReduce the output current or input voltage.\nExample calculation for I out > (IMAX * 2/3):\nMinR = (VIN * 13%) / (IMAX * 2/3)",
120: "Current Measurement Error:\nOffset during initialization of OUT+ current monitor too high\n[Channel CHx according to instance]",
121: "Current Measurement Error:\nOffset during initialization of OUT+ current monitor too low\n[Channel CHx according to instance]",
122: "Current Measurement | |
cmd, *args):
"""
Builder for simple commands
@param cmd The command to build
@param args Unused arguments
@retval Returns string ready for sending to instrument
"""
return "%s%s" % (cmd, self._newline)
def _build_keypress_command(self, cmd, *args):
"""
Builder for simple, non-EOLN-terminated commands
@param cmd The command to build
@param args Unused arguments
@retval Returns string ready for sending to instrument
"""
return "%s" % cmd
def _build_multi_keypress_command(self, cmd, *args):
"""
Builder for simple, non-EOLN-terminated commands
@param cmd The command to build
@param args Unused arguments
@retval Returns string ready for sending to instrument
"""
return "%s%s%s%s%s%s" % (cmd, cmd, cmd, cmd, cmd, cmd)
########################################################################
# Static helpers to format set commands.
########################################################################
@staticmethod
def _true_false_to_string(v):
"""
Write a boolean value to string formatted for "generic" set operations.
Subclasses should overload this as needed for instrument-specific
formatting.
@param v a boolean value.
@retval A yes/no string formatted as a Python boolean for set operations.
@throws InstrumentParameterException if value not a bool.
"""
if not isinstance(v, bool):
raise InstrumentParameterException('Value %s is not a bool.' % str(v))
return str(v)
@staticmethod
def _int_to_string(v):
"""
Write an int value to string formatted for "generic" set operations.
Subclasses should overload this as needed for instrument-specific
formatting.
@param v An int val.
@retval an int string formatted for generic set operations.
@throws InstrumentParameterException if value not an int.
"""
if not isinstance(v, int):
raise InstrumentParameterException('Value %s is not an int.' % str(v))
else:
return '%i' % v
@staticmethod
def _float_to_string(v):
"""
Write a float value to string formatted for "generic" set operations.
Subclasses should overload this as needed for instrument-specific
formatting.
@param v A float val.
@retval a float string formatted for "generic" set operations.
@throws InstrumentParameterException if value is not a float.
"""
if not isinstance(v, float):
raise InstrumentParameterException('Value %s is not a float.' % v)
else:
return '%e' % v
def _get_param_list(self, *args, **kwargs):
"""
returns a list of parameters based on the list passed in. If the
list contains and ALL parameters request then the list will contain
all parameters. Otherwise the original list will be returned. Also
check the list for unknown parameters
@param args[0] list of parameters to inspect
@return: list of parameters.
@raises: InstrumentParameterException when the wrong param type is passed
in or an unknown parameter is in the list
"""
try:
param_list = args[0]
except IndexError:
raise InstrumentParameterException('Parameter required, none specified')
if isinstance(param_list, str):
param_list = [param_list]
elif not isinstance(param_list, (list, tuple)):
raise InstrumentParameterException("Expected a list, tuple or a string")
# Verify all parameters are known parameters
bad_params = []
known_params = self._param_dict.get_keys() + [DriverParameter.ALL]
for param in param_list:
if param not in known_params:
bad_params.append(param)
if len(bad_params):
raise InstrumentParameterException("Unknown parameters: %s" % bad_params)
if DriverParameter.ALL in param_list:
return self._param_dict.get_keys()
else:
return param_list
class CommandResponseInstrumentProtocol(InstrumentProtocol):
"""
Base class for text-based command-response instruments.
"""
def __init__(self, prompts, newline, driver_event):
"""
Constructor.
@param prompts Enum class containing possible device prompts used for
command response logic.
@param newline The device newline.
@driver_event The callback for asynchronous driver events.
"""
# Construct superclass.
InstrumentProtocol.__init__(self, driver_event)
# The end of line delimiter.
self._newline = newline
# Class of prompts used by device.
self._prompts = prompts
# Line buffer for input from device.
self._linebuf = ''
# Short buffer to look for prompts from device in command-response
# mode.
self._promptbuf = ''
# Lines of data awaiting further processing.
self._datalines = []
# Handlers to build commands.
self._build_handlers = {}
# Handlers to parse responses.
self._response_handlers = {}
self._last_data_timestamp = 0
def _get_prompts(self):
"""
Return a list of prompts order from longest to shortest. The
assumption is the longer is more specific.
@return: list of prompts orders by length.
"""
if isinstance(self._prompts, list):
prompts = self._prompts
else:
prompts = self._prompts.list()
prompts.sort(lambda x, y: cmp(len(y), len(x)))
return prompts
def _get_response(self, timeout=10, expected_prompt=None, response_regex=None):
"""
Get a response from the instrument, but be a bit loose with what we
find. Leave some room for white space around prompts and not try to
match that just in case we are off by a little whitespace or not quite
at the end of a line.
@todo Consider cases with no prompt
@param timeout The timeout in seconds
@param expected_prompt Only consider the specific expected prompt as
presented by this string
@param response_regex Look for a response value that matches the
supplied compiled regex pattern. Groups that match will be returned as a
string. Cannot be used with expected prompt. None
will be returned as a prompt with this match. If a regex is supplied,
internal the prompt list will be ignored.
@retval Regex search result tuple (as MatchObject.groups() would return
if a response_regex is supplied. A tuple of (prompt, response) if a
prompt is looked for.
@throw InstrumentProtocolException if both regex and expected prompt are
passed in or regex is not a compiled pattern.
@throw InstrumentTimeoutException on timeout
"""
# Grab time for timeout and wait for prompt.
starttime = time.time()
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
if expected_prompt is None:
prompt_list = self._get_prompts()
else:
if isinstance(expected_prompt, str):
prompt_list = [expected_prompt]
else:
prompt_list = expected_prompt
if response_regex is None:
pattern = None
else:
pattern = response_regex.pattern
log.debug('_get_response: timeout=%s, prompt_list=%s, expected_prompt=%r, response_regex=%r, promptbuf=%r',
timeout, prompt_list, expected_prompt, pattern, self._promptbuf)
while True:
if response_regex:
match = response_regex.search(self._linebuf)
if match:
return match.groups()
else:
for item in prompt_list:
index = self._promptbuf.find(item)
if index >= 0:
result = self._promptbuf[0:index + len(item)]
return item, result
time.sleep(.1)
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in InstrumentProtocol._get_response()")
def _get_raw_response(self, timeout=10, expected_prompt=None):
"""
Get a response from the instrument, but don't trim whitespace. Used in
times when the whitespace is what we are looking for.
@param timeout The timeout in seconds
@param expected_prompt Only consider the specific expected prompt as
presented by this string
@throw InstrumentProtocolException on timeout
"""
# Grab time for timeout and wait for prompt.
strip_chars = "\t "
starttime = time.time()
if expected_prompt is None:
prompt_list = self._get_prompts()
else:
if isinstance(expected_prompt, str):
prompt_list = [expected_prompt]
else:
prompt_list = expected_prompt
while True:
for item in prompt_list:
if self._promptbuf.rstrip(strip_chars).endswith(item.rstrip(strip_chars)):
return item, self._linebuf
else:
time.sleep(.1)
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in InstrumentProtocol._get_raw_response()")
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Perform a command-response on the device.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param write_delay kwarg for the amount of delay in seconds to pause
between each character. If none supplied, the DEFAULT_WRITE_DELAY
value will be used.
@param timeout optional wakeup and command timeout via kwargs.
@param expected_prompt kwarg offering a specific prompt to look for
other than the ones in the protocol class itself.
@param response_regex kwarg with a compiled regex for the response to
match. Groups that match will be returned as a string.
Cannot be supplied with expected_prompt. May be helpful for
instruments that do not have a prompt.
@retval resp_result The (possibly parsed) response result including the
first instance of the prompt matched. If a regex was used, the prompt
will be an empty string and the response will be the joined collection
of matched groups.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
# Get timeout and initialize response.
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
expected_prompt = kwargs.get('expected_prompt', None)
response_regex = kwargs.get('response_regex', None)
write_delay = kwargs.get('write_delay', DEFAULT_WRITE_DELAY)
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
# Get the build handler.
build_handler = self._build_handlers.get(cmd, None)
if not build_handler:
raise InstrumentProtocolException('Cannot build command: %s' % cmd)
if not callable(build_handler):
raise InstrumentProtocolException('Build handler is not callable')
cmd_line = | |
if tableRowValues is None:
self.tableRowValues = []
else:
self.tableRowValues = tableRowValues
def factory(*args_, **kwargs_):
if TableType.subclass:
return TableType.subclass(*args_, **kwargs_)
else:
return TableType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tableColumnTypes(self): return self.tableColumnTypes
def set_tableColumnTypes(self, tableColumnTypes): self.tableColumnTypes = tableColumnTypes
def get_tableRowValues(self): return self.tableRowValues
def set_tableRowValues(self, tableRowValues): self.tableRowValues = tableRowValues
def add_tableRowValues(self, value): self.tableRowValues.append(value)
def insert_tableRowValues_at(self, index, value): self.tableRowValues.insert(index, value)
def replace_tableRowValues_at(self, index, value): self.tableRowValues[index] = value
def validate_listOfStrings(self, value):
# Validate type listOfStrings, a restriction on xsd:string.
pass
def hasContent_(self):
if (
self.tableColumnTypes is not None or
self.tableRowValues
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TableType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TableType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TableType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TableType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TableType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.tableColumnTypes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stableColumnTypes>%s</%stableColumnTypes>%s' % (namespace_, self.gds_format_string(quote_xml(' '.join(self.tableColumnTypes)), input_name='tableColumnTypes'), namespace_, eol_))
for tableRowValues_ in self.tableRowValues:
showIndent(outfile, level, pretty_print)
outfile.write('<%stableRowValues>%s</%stableRowValues>%s' % (namespace_, self.gds_format_string(quote_xml(tableRowValues_), input_name='tableRowValues'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'tableColumnTypes':
tableColumnTypes_ = child_.text
self.tableColumnTypes = tableColumnTypes_
# validate type listOfStrings
self.validate_listOfStrings(self.tableColumnTypes)
elif nodeName_ == 'tableRowValues':
tableRowValues_ = child_.text
self.tableRowValues.append(tableRowValues_)
# validate type listOfStrings
self.validate_listOfStrings(self.tableRowValues[-1])
# end class TableType
class CVListType(GeneratedsSuper):
"""The list of controlled vocabularies used in the file."""
subclass = None
superclass = None
def __init__(self, cv=None):
self.original_tagname_ = None
if cv is None:
self.cv = []
else:
self.cv = cv
def factory(*args_, **kwargs_):
if CVListType.subclass:
return CVListType.subclass(*args_, **kwargs_)
else:
return CVListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cv(self): return self.cv
def set_cv(self, cv): self.cv = cv
def add_cv(self, value): self.cv.append(value)
def insert_cv_at(self, index, value): self.cv.insert(index, value)
def replace_cv_at(self, index, value): self.cv[index] = value
def hasContent_(self):
if (
self.cv
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CVListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CVListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CVListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CVListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='CVListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for cv_ in self.cv:
cv_.export(outfile, level, namespace_, name_='cv', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'cv':
obj_ = CVType.factory()
obj_.build(child_)
self.cv.append(obj_)
obj_.original_tagname_ = 'cv'
# end class CVListType
class CVType(GeneratedsSuper):
"""A source controlled vocabulary from which cvParams will be
obtained.The full name of the CV.The version of the CV.The URI
of the source CV.The unique identifier of this cv within the
document to be referenced by cvParam elements."""
subclass = None
superclass = None
def __init__(self, fullName=None, version=None, uri=None, ID=None):
self.original_tagname_ = None
self.fullName = _cast(None, fullName)
self.version = _cast(None, version)
self.uri = _cast(None, uri)
self.ID = _cast(None, ID)
def factory(*args_, **kwargs_):
if CVType.subclass:
return CVType.subclass(*args_, **kwargs_)
else:
return CVType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_fullName(self): return self.fullName
def set_fullName(self, fullName): self.fullName = fullName
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_uri(self): return self.uri
def set_uri(self, uri): self.uri = uri
def get_ID(self): return self.ID
def set_ID(self, ID): self.ID = ID
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CVType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CVType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CVType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CVType'):
if self.fullName is not None and 'fullName' not in already_processed:
already_processed.add('fullName')
outfile.write(' fullName=%s' % (self.gds_format_string(quote_attrib(self.fullName), input_name='fullName'), ))
if self.version is not None and 'version' not in already_processed:
already_processed.add('version')
outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version), input_name='version'), ))
if self.uri is not None and 'uri' not in already_processed:
already_processed.add('uri')
outfile.write(' uri=%s' % (self.gds_format_string(quote_attrib(self.uri), input_name='uri'), ))
if self.ID is not None and 'ID' not in already_processed:
already_processed.add('ID')
outfile.write(' ID=%s' % (self.gds_format_string(quote_attrib(self.ID), input_name='ID'), ))
def exportChildren(self, outfile, level, namespace_='', name_='CVType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('fullName', node)
if value is not None and 'fullName' not in already_processed:
already_processed.add('fullName')
self.fullName = value
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.add('version')
self.version = value
value = find_attr_value_('uri', node)
if value is not None and 'uri' not in already_processed:
already_processed.add('uri')
self.uri = value
value = find_attr_value_('ID', node)
if value is not None and 'ID' not in already_processed:
already_processed.add('ID')
self.ID = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class CVType
class embeddedStylesheetListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, anytypeobjs_=None):
self.original_tagname_ = None
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if embeddedStylesheetListType.subclass:
return embeddedStylesheetListType.subclass(*args_, **kwargs_)
else:
return embeddedStylesheetListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='embeddedStylesheetListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='embeddedStylesheetListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='embeddedStylesheetListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='embeddedStylesheetListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='embeddedStylesheetListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
outfile.write(obj_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'embeddedStylesheetListType')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class embeddedStylesheetListType
class thresholdType(CVParamType):
subclass = None
superclass = CVParamType
def __init__(self, name=None, unitName=None, value=None, unitAccession=None, unitCvRef=None, description=None, cvRef=None, accession=None, thresholdFilename=None):
self.original_tagname_ = None
super(thresholdType, self).__init__(name, unitName, value, unitAccession, unitCvRef, description, cvRef, accession, )
self.thresholdFilename = _cast(None, thresholdFilename)
def factory(*args_, **kwargs_):
if thresholdType.subclass:
return thresholdType.subclass(*args_, **kwargs_)
else:
return thresholdType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_thresholdFilename(self): return self.thresholdFilename
def set_thresholdFilename(self, thresholdFilename): self.thresholdFilename = thresholdFilename
def hasContent_(self):
if (
super(thresholdType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='thresholdType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='thresholdType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, | |
= cursor.fetchone()
if result:
try:
if listener:
low = result['Roll_low']
high = result['Roll_high']
listener.item_rolled(purpose, low, high, strength)
except IndexError as ex:
return None
else:
#print('No result for roll', roll)
return None
if ENABLE_CACHE:
if self.cache_style == 1:
line = {}
line['low'] = result['Roll_low']
line['high'] = result['Roll_high']
line['result'] = result
self.cache[strength].append(line)
elif self.cache_style == 2:
for i in range(int(result['Roll_low']), int(result['Roll_high']) + 1):
self.cache[strength][i] = result
return result
def find_flat_custom(self, conn, where, where_vars):
sql = 'SELECT * FROM {0} '.format(self.table) + where
cursor = conn.cursor()
if where_vars != None:
cursor.execute(sql, where_vars)
else:
cursor.execute(sql)
rows = cursor.fetchall()
collected = []
total_rollspace = 0
for row in rows:
roll_low = row['Roll_low']
roll_high = row['Roll_high']
rollspace = (roll_high - roll_low) + 1
collected.append( (total_rollspace + rollspace, row) )
total_rollspace += rollspace
fake_roll = random.randint(0, total_rollspace)
for row in collected:
if fake_roll < row[0]:
return row[1]
return None
# Ultimate Equipment Tables
TABLE_MAGIC_ARMOR_AND_SHIELDS = Table('Magic_Armor_and_Shields')
TABLE_MAGIC_WEAPONS = Table('Magic_Weapons')
TABLE_METAMAGIC_RODS_1 = Table('Metamagic_Rods_1')
TABLE_METAMAGIC_RODS_2 = Table('Metamagic_Rods_2')
TABLE_METAMAGIC_RODS_3 = Table('Metamagic_Rods_3')
TABLE_POTION_OR_OIL_LEVEL_0 = Table('Potion_or_Oil_Level_0')
TABLE_POTION_OR_OIL_LEVEL_1 = Table('Potion_or_Oil_Level_1')
TABLE_POTION_OR_OIL_LEVEL_2 = Table('Potion_or_Oil_Level_2')
TABLE_POTION_OR_OIL_LEVEL_3 = Table('Potion_or_Oil_Level_3')
TABLE_POTION_OR_OIL_TYPE = Table('Potion_or_Oil_Type')
TABLE_RANDOM_ARMOR_OR_SHIELD = Table('Random_Armor_or_Shield')
TABLE_RANDOM_ART_OBJECTS = Table('Random_Art_Objects')
TABLE_RANDOM_GEMS = Table('Random_Gems')
TABLE_RANDOM_POTIONS_AND_OILS = Table('Random_Potions_and_Oils')
TABLE_RANDOM_SCROLLS = Table('Random_Scrolls')
TABLE_RANDOM_WANDS = Table('Random_Wands')
TABLE_RANDOM_WEAPON = Table('Random_Weapon')
TABLE_RINGS = Table('Rings')
TABLE_RODS = Table('Rods')
TABLE_SCROLLS_ARCANE_LEVEL_0 = Table('Scrolls_Arcane_Level_0')
TABLE_SCROLLS_ARCANE_LEVEL_1 = Table('Scrolls_Arcane_Level_1')
TABLE_SCROLLS_ARCANE_LEVEL_2 = Table('Scrolls_Arcane_Level_2')
TABLE_SCROLLS_ARCANE_LEVEL_3 = Table('Scrolls_Arcane_Level_3')
TABLE_SCROLLS_ARCANE_LEVEL_4 = Table('Scrolls_Arcane_Level_4')
TABLE_SCROLLS_ARCANE_LEVEL_5 = Table('Scrolls_Arcane_Level_5')
TABLE_SCROLLS_ARCANE_LEVEL_6 = Table('Scrolls_Arcane_Level_6')
TABLE_SCROLLS_ARCANE_LEVEL_7 = Table('Scrolls_Arcane_Level_7')
TABLE_SCROLLS_ARCANE_LEVEL_8 = Table('Scrolls_Arcane_Level_8')
TABLE_SCROLLS_ARCANE_LEVEL_9 = Table('Scrolls_Arcane_Level_9')
TABLE_SCROLLS_DIVINE_LEVEL_0 = Table('Scrolls_Divine_Level_0')
TABLE_SCROLLS_DIVINE_LEVEL_1 = Table('Scrolls_Divine_Level_1')
TABLE_SCROLLS_DIVINE_LEVEL_2 = Table('Scrolls_Divine_Level_2')
TABLE_SCROLLS_DIVINE_LEVEL_3 = Table('Scrolls_Divine_Level_3')
TABLE_SCROLLS_DIVINE_LEVEL_4 = Table('Scrolls_Divine_Level_4')
TABLE_SCROLLS_DIVINE_LEVEL_5 = Table('Scrolls_Divine_Level_5')
TABLE_SCROLLS_DIVINE_LEVEL_6 = Table('Scrolls_Divine_Level_6')
TABLE_SCROLLS_DIVINE_LEVEL_7 = Table('Scrolls_Divine_Level_7')
TABLE_SCROLLS_DIVINE_LEVEL_8 = Table('Scrolls_Divine_Level_8')
TABLE_SCROLLS_DIVINE_LEVEL_9 = Table('Scrolls_Divine_Level_9')
TABLE_SCROLL_TYPE = Table('Scroll_Type')
TABLE_SPECIAL_ABILITIES_AMMUNITION = Table('Special_Abilities_Ammunition')
TABLE_SPECIAL_ABILITIES_ARMOR = Table('Special_Abilities_Armor')
TABLE_SPECIAL_ABILITIES_MELEE_WEAPON = Table('Special_Abilities_Melee_Weapon')
TABLE_SPECIAL_ABILITIES_RANGED_WEAPON = Table('Special_Abilities_Ranged_Weapon')
TABLE_SPECIAL_ABILITIES_SHIELD = Table('Special_Abilities_Shield')
TABLE_SPECIAL_BANE = Table('Special_Bane')
TABLE_SPECIAL_SLAYING_ARROW = Table('Special_Slaying_Arrow')
TABLE_SPECIFIC_ARMOR = Table('Specific_Armor')
TABLE_SPECIFIC_CURSED_ITEMS = Table('Specific_Cursed_Items')
TABLE_SPECIFIC_SHIELDS = Table('Specific_Shields')
TABLE_SPECIFIC_WEAPONS = Table('Specific_Weapons')
TABLE_STAVES = Table('Staves')
TABLE_WAND_LEVEL_0 = Table('Wand_Level_0')
TABLE_WAND_LEVEL_1 = Table('Wand_Level_1')
TABLE_WAND_LEVEL_2 = Table('Wand_Level_2')
TABLE_WAND_LEVEL_3 = Table('Wand_Level_3')
TABLE_WAND_LEVEL_4 = Table('Wand_Level_4')
TABLE_WAND_TYPE = Table('Wand_Type')
TABLE_WONDROUS_ITEMS = Table('Wondrous_Items')
TABLE_WONDROUS_ITEMS_BELT = Table('Wondrous_Items_Belt')
TABLE_WONDROUS_ITEMS_BODY = Table('Wondrous_Items_Body')
TABLE_WONDROUS_ITEMS_CHEST = Table('Wondrous_Items_Chest')
TABLE_WONDROUS_ITEMS_EYES = Table('Wondrous_Items_Eyes')
TABLE_WONDROUS_ITEMS_FEET = Table('Wondrous_Items_Feet')
TABLE_WONDROUS_ITEMS_HANDS = Table('Wondrous_Items_Hands')
TABLE_WONDROUS_ITEMS_HEAD = Table('Wondrous_Items_Head')
TABLE_WONDROUS_ITEMS_HEADBAND = Table('Wondrous_Items_Headband')
TABLE_WONDROUS_ITEMS_NECK = Table('Wondrous_Items_Neck')
TABLE_WONDROUS_ITEMS_SHOULDERS = Table('Wondrous_Items_Shoulders')
TABLE_WONDROUS_ITEMS_SLOTLESS = Table('Wondrous_Items_Slotless')
TABLE_WONDROUS_ITEMS_WRISTS = Table('Wondrous_Items_Wrists')
class Item(object):
#
# Methods that are not meant to be overridden
# Initializes object variables
def __init__(self, kind):
# The kind of item, stored mainly so a subclass doesn't need to know
# its own name.
self.kind = kind
# All the rolls that led to the selection of the item
self.rolls = []
# The item label, before any additions, which subclasses track on
# their own.
self.label = ''
# Generation parameters
# Strength: lesser/greater + major/medium/minor
self.strength = ''
# Roller
self.roller = None
# Subtype (when Wondrous)
self.subtype = ''
# Validity flag (used in enumeration mode)
self.bad_item = False
# Price
self.price = None
# Generates the item, referring to the subclass, following the Template
# Method design pattern.
def generate(self, conn, strength, roller, listener):
# Initialize generation parameters.
self.strength = strength
self.roller = roller
# Look up the item
self.lookup(conn, listener)
# The standard __str__ method
def __str__(self):
# If the subtype is already in the name, skip it.
if self.label.startswith(self.subtype):
s = self.label
else:
s = self.subtype + ': ' + self.label
if self.bad_item:
s += " [invalid]"
return s
# Return a dictionary describing the item.
def get_dict(self):
return {
'item' : unicode(self),
'value_num' : self.price.as_float() if self.price is not None else 0,
'value_str' : str(self.price if self.price is not None else '')
}
#
# Information on the finished item
def is_bad(self):
return self.bad_item
#
# Consider these "private"
# Rolls and keeps a log of the rolled values.
def roll(self, roll_expr, purpose):
roll = self.roller.roll(roll_expr, purpose)
self.rolls.append((roll_expr, roll))
return roll
# Removes the last roll from the log.
def unroll(self):
if len(self.rolls) > 0:
self.rolls.pop()
#
# Methods that are meant to be overridden
# The standard __repr__ method
def __repr__(self):
result = '<Item '
result += 'rolls:{} '.format(self.rolls)
result += 'label:{}'.format(self.label)
result += '>'
return result
class InvalidItem(Item):
def __init__(self, reason):
Item.__init__(self, KEY_INVALID)
self.label = reason
self.subtype = ''
class DatabaseItem(Item):
def __init__(self, subtype, item, price):
Item.__init__(self, KEY_DATABASE)
self.subtype = subtype
self.label = item
self.price = Price(price)
class Armor(Item):
def __init__(self):
Item.__init__(self, KEY_ARMOR)
# Load tables
self.t_random = TABLE_RANDOM_ARMOR_OR_SHIELD
self.t_magic = TABLE_MAGIC_ARMOR_AND_SHIELDS
self.t_specific_armor = TABLE_SPECIFIC_ARMOR
self.t_specials_armor = TABLE_SPECIAL_ABILITIES_ARMOR
self.t_specific_shield = TABLE_SPECIFIC_SHIELDS
self.t_specials_shield = TABLE_SPECIAL_ABILITIES_SHIELD
self.re_enhancement = re.compile('\+(\d+) armor or shield')
self.re_specials = re.compile('with (\w+) \+(\d+) special')
# Armor details
# Generic item or specific
self.is_generic = True
# Armor piece
self.armor_base = ''
# Armor or shield
self.armor_type = ''
# Mundane item price
self.armor_price = Price('0 gp')
# Raw enhancement bonus
self.enhancement = 0
# Dict of specials to costs
self.specials = {}
# Specific item name
self.specific_name = ''
# Specific item cost
self.specific_price = ''
def __repr__(self):
result = '<Armor'
result += '>'
return result
def lookup(self, conn, listener):
# We don't do 'least minor'
if self.strength == 'least minor':
self.strength = 'lesser minor'
# Roll for the item.
purpose = 'armor type'
roll = self.roll('1d100', purpose)
# Look up the roll.
rolled_armor = self.t_random.find_roll(conn, roll, None, purpose, listener)
self.armor_base = rolled_armor['Result']
self.armor_type = rolled_armor['Type']
self.armor_price = rolled_armor['Price']
self.enhancement = 0
# Roll for the magic property.
purpose = 'armor magic property'
roll = self.roll('1d100', purpose)
rolled_magic = self.t_magic.find_roll(conn, roll, self.strength, purpose, listener)
magic_type = rolled_magic['Result']
# Handle it
if magic_type.endswith('specific armor or shield'):
self.make_specific(conn, listener)
else:
self.make_generic(conn, magic_type, listener)
# Subtype
self.subtype = ''
if self.armor_type == 'armor':
self.subtype = 'Armor'
elif self.armor_type == 'shield':
self.subtype = 'Shield'
else:
self.subtype = 'Armor/Shield'
# Item specifics
if self.is_generic:
# Compose the label.
self.label = self.armor_base
if self.enhancement > 0:
self.label += ' +' + str(self.enhancement)
for spec in sorted(self.specials.keys()):
self.label += '/' + spec
# Compose a price.
# Start with the base cost.
self.price = Price(self.armor_price, self.armor_type)
# Add magic costs.
if self.enhancement:
# Masterwork component
self.price.add(150)
# Initial enhancement bonus
self.price.add_enhancement(self.enhancement)
# Special costs
for spec in self.specials.keys():
self.price.add(self.specials[spec])
else:
# Specific magic armor. Just copy the details.
self.label += self.specific_name
self.price = Price(self.specific_price)
def make_generic(self, conn, specification, listener):
self.is_generic = True
# "Regular" magic item, with an assortment of bonuses. We already
# know what we need in the specification param.
special_count = 0
special_strength = 0
# This part is always at the beginning
match = self.re_enhancement.match(specification)
if match:
self.enhancement = int(match.group(1))
# This might be in the middle of the string
match = self.re_specials.search(specification)
if match:
special_count = {'one': 1, 'two': 2}[match.group(1)]
special_strength = '+' + match.group(2)
# Add specials!
for i in range(special_count):
# Generate a special.
result = self.generate_special(conn, special_strength, listener)
if result == None:
# Mark as bad item.
self.bad_item = True
return
# At this point, the special is good. It may be a repeat, which
# should be ignored, according to CRB.
special = result['Result']
price = result['Price']
self.specials[special] = price
# Filter out weaker versions of other specials.
filter_specials(self.specials)
def generate_special(self, conn, special_strength, listener):
# Roll for a special.
purpose = 'armor special ability ' + str(len(self.specials.keys()) + 1)
roll = self.roll('1d100', purpose)
# Look it up.
result = None
if self.armor_type == 'armor':
result = self.t_specials_armor.find_roll(conn, roll,
special_strength, purpose, listener)
else:
result = self.t_specials_shield.find_roll(conn, roll,
special_strength, purpose, listener)
special = result['Result']
price = result['Price']
return result
def make_specific(self, conn, listener):
# Specific
self.is_generic = False
# Roll for the specific armor.
purpose = 'specific magic armor'
roll = self.roll('1d100', purpose)
# Look it up.
result = None
if self.armor_type == 'armor':
result = self.t_specific_armor.find_roll(conn, roll,
self.strength, purpose, listener)
else:
result = self.t_specific_shield.find_roll(conn, roll,
self.strength, purpose, listener)
self.specific_name = result['Result']
self.specific_price = result['Price']
class Weapon(Item):
def __init__(self):
Item.__init__(self, KEY_ARMOR)
# Load tables
self.t_random = TABLE_RANDOM_WEAPON
self.t_magic = TABLE_MAGIC_WEAPONS
self.t_specific_weapon = TABLE_SPECIFIC_WEAPONS
self.t_specials_melee = TABLE_SPECIAL_ABILITIES_MELEE_WEAPON
self.t_specials_ranged = TABLE_SPECIAL_ABILITIES_RANGED_WEAPON
self.re_enhancement = re.compile('\+(\d+) weapon')
# Expression for:
# with one +X special ability
# with two +X special abilities
self.re_specials = re.compile(' (\w+) \+(\d+) special')
# Weapon details
# Generic item or specific
self.is_generic = True
# Weapon type
self.weapon_base = ''
# Melee, ranged, ammunition
self.weapon_type = ''
# Light, one-hand, or two-hand
self.wield_type = ''
# Mundane item price
self.weapon_price = '0 gp'
| |
<filename>plugins/hg4idea/testData/bin/hgext/graphlog.py
# ASCII graph log extension for Mercurial
#
# Copyright 2007 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to view revision graphs from a shell
This extension adds a --graph option to the incoming, outgoing and log
commands. When this options is given, an ASCII representation of the
revision graph is also shown.
'''
import os
from mercurial.cmdutil import revrange, show_changeset
from mercurial.commands import templateopts
from mercurial.i18n import _
from mercurial.node import nullrev
from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
from mercurial import hg, url, util, graphmod
ASCIIDATA = 'ASC'
def asciiedges(seen, rev, parents):
"""adds edge info to changelog DAG walk suitable for ascii()"""
if rev not in seen:
seen.append(rev)
nodeidx = seen.index(rev)
knownparents = []
newparents = []
for parent in parents:
if parent in seen:
knownparents.append(parent)
else:
newparents.append(parent)
ncols = len(seen)
seen[nodeidx:nodeidx + 1] = newparents
edges = [(nodeidx, seen.index(p)) for p in knownparents]
if len(newparents) > 0:
edges.append((nodeidx, nodeidx))
if len(newparents) > 1:
edges.append((nodeidx, nodeidx + 1))
nmorecols = len(seen) - ncols
return nodeidx, edges, ncols, nmorecols
def fix_long_right_edges(edges):
for (i, (start, end)) in enumerate(edges):
if end > start:
edges[i] = (start, end + 1)
def get_nodeline_edges_tail(
node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
# Still going in the same non-vertical direction.
if n_columns_diff == -1:
start = max(node_index + 1, p_node_index)
tail = ["|", " "] * (start - node_index - 1)
tail.extend(["/", " "] * (n_columns - start))
return tail
else:
return ["\\", " "] * (n_columns - node_index - 1)
else:
return ["|", " "] * (n_columns - node_index - 1)
def draw_edges(edges, nodeline, interline):
for (start, end) in edges:
if start == end + 1:
interline[2 * end + 1] = "/"
elif start == end - 1:
interline[2 * start + 1] = "\\"
elif start == end:
interline[2 * start] = "|"
else:
nodeline[2 * end] = "+"
if start > end:
(start, end) = (end, start)
for i in range(2 * start + 1, 2 * end):
if nodeline[i] != "+":
nodeline[i] = "-"
def get_padding_line(ni, n_columns, edges):
line = []
line.extend(["|", " "] * ni)
if (ni, ni - 1) in edges or (ni, ni) in edges:
# (ni, ni - 1) (ni, ni)
# | | | | | | | |
# +---o | | o---+
# | | c | | c | |
# | |/ / | |/ /
# | | | | | |
c = "|"
else:
c = " "
line.extend([c, " "])
line.extend(["|", " "] * (n_columns - ni - 1))
return line
def asciistate():
"""returns the initial value for the "state" argument to ascii()"""
return [0, 0]
def ascii(ui, state, type, char, text, coldata):
"""prints an ASCII graph of the DAG
takes the following arguments (one call per node in the graph):
- ui to write to
- Somewhere to keep the needed state in (init to asciistate())
- Column of the current node in the set of ongoing edges.
- Type indicator of node data == ASCIIDATA.
- Payload: (char, lines):
- Character to use as node's symbol.
- List of lines to display as the node's text.
- Edges; a list of (col, next_col) indicating the edges between
the current node and its parents.
- Number of columns (ongoing edges) in the current revision.
- The difference between the number of columns (ongoing edges)
in the next revision and the number of columns (ongoing edges)
in the current revision. That is: -1 means one column removed;
0 means no columns added or removed; 1 means one column added.
"""
idx, edges, ncols, coldiff = coldata
assert -2 < coldiff < 2
if coldiff == -1:
# Transform
#
# | | | | | |
# o | | into o---+
# |X / |/ /
# | | | |
fix_long_right_edges(edges)
# add_padding_line says whether to rewrite
#
# | | | | | | | |
# | o---+ into | o---+
# | / / | | | # <--- padding line
# o | | | / /
# o | |
add_padding_line = (len(text) > 2 and coldiff == -1 and
[x for (x, y) in edges if x + 1 < y])
# fix_nodeline_tail says whether to rewrite
#
# | | o | | | | o | |
# | | |/ / | | |/ /
# | o | | into | o / / # <--- fixed nodeline tail
# | |/ / | |/ /
# o | | o | |
fix_nodeline_tail = len(text) <= 2 and not add_padding_line
# nodeline is the line containing the node character (typically o)
nodeline = ["|", " "] * idx
nodeline.extend([char, " "])
nodeline.extend(
get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
state[0], fix_nodeline_tail))
# shift_interline is the line containing the non-vertical
# edges between this entry and the next
shift_interline = ["|", " "] * idx
if coldiff == -1:
n_spaces = 1
edge_ch = "/"
elif coldiff == 0:
n_spaces = 2
edge_ch = "|"
else:
n_spaces = 3
edge_ch = "\\"
shift_interline.extend(n_spaces * [" "])
shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
# draw edges from the current node to its parents
draw_edges(edges, nodeline, shift_interline)
# lines is the list of all graph lines to print
lines = [nodeline]
if add_padding_line:
lines.append(get_padding_line(idx, ncols, edges))
lines.append(shift_interline)
# make sure that there are as many graph lines as there are
# log strings
while len(text) < len(lines):
text.append("")
if len(lines) < len(text):
extra_interline = ["|", " "] * (ncols + coldiff)
while len(lines) < len(text):
lines.append(extra_interline)
# print lines
indentation_level = max(ncols, ncols + coldiff)
for (line, logstr) in zip(lines, text):
ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
ui.write(ln.rstrip() + '\n')
# ... and start over
state[0] = coldiff
state[1] = idx
def get_revs(repo, rev_opt):
if rev_opt:
revs = revrange(repo, rev_opt)
return (max(revs), min(revs))
else:
return (len(repo) - 1, 0)
def check_unsupported_flags(opts):
for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
"only_merges", "user", "only_branch", "prune", "newest_first",
"no_merges", "include", "exclude"]:
if op in opts and opts[op]:
raise util.Abort(_("--graph option is incompatible with --%s")
% op.replace("_", "-"))
def generate(ui, dag, displayer, showparents, edgefn):
seen, state = [], asciistate()
for rev, type, ctx, parents in dag:
char = ctx.node() in showparents and '@' or 'o'
displayer.show(ctx)
lines = displayer.hunk.pop(rev).split('\n')[:-1]
ascii(ui, state, type, char, lines, edgefn(seen, rev, parents))
def graphlog(ui, repo, path=None, **opts):
"""show revision history alongside an ASCII revision graph
Print a revision history alongside a revision graph drawn with
ASCII characters.
Nodes printed as an @ character are parents of the working
directory.
"""
check_unsupported_flags(opts)
limit = cmdutil.loglimit(opts)
start, stop = get_revs(repo, opts["rev"])
if start == nullrev:
return
if path:
path = util.canonpath(repo.root, os.getcwd(), path)
if path: # could be reset in canonpath
revdag = graphmod.filerevs(repo, path, start, stop, limit)
else:
if limit is not None:
stop = max(stop, start - limit + 1)
revdag = graphmod.revisions(repo, start, stop)
displayer = show_changeset(ui, repo, opts, buffered=True)
showparents = [ctx.node() for ctx in repo[None].parents()]
generate(ui, revdag, displayer, showparents, asciiedges)
def graphrevs(repo, nodes, opts):
limit = cmdutil.loglimit(opts)
nodes.reverse()
if limit is not None:
nodes = nodes[:limit]
return graphmod.nodes(repo, nodes)
def goutgoing(ui, repo, dest=None, **opts):
"""show the outgoing changesets alongside an ASCII revision graph
Print the outgoing changesets alongside a revision graph drawn with
ASCII characters.
Nodes printed as an @ character are parents of the working
directory.
"""
check_unsupported_flags(opts)
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
other = hg.repository(cmdutil.remoteui(ui, opts), dest)
if revs:
revs = [repo.lookup(rev) for rev in revs]
ui.status(_('comparing with %s\n') % url.hidepassword(dest))
o = repo.findoutgoing(other, force=opts.get('force'))
if not o:
ui.status(_("no changes found\n"))
| |
= Tools.timeToIndex(spks, self.dt)
# Compute adaptation current (sum of eta triggered at spike times in spks)
eta_sum = np.array(np.zeros(int(p_T + 1.1*p_eta_l + p_Tref_i)), dtype="double")
for s in spks_i :
eta_sum[s + 1 + p_Tref_i : s + 1 + p_Tref_i + p_eta_l] += p_eta
eta_sum = eta_sum[:p_T]
# Set initial condition
V[0] = V0
code = """
#include <math.h>
int T_ind = int(p_T);
float dt = float(p_dt);
float gl = float(p_gl);
float C = float(p_C);
float El = float(p_El);
float Vr = float(p_Vr);
int Tref_ind = int(float(p_Tref)/dt);
int next_spike = spks_i[0] + Tref_ind;
int spks_cnt = 0;
for (int t=0; t<T_ind-1; t++) {
// INTEGRATE VOLTAGE
V[t+1] = V[t] + dt/C*( -gl*(V[t] - El) + I[t] - eta_sum[t] );
if ( t == next_spike ) {
spks_cnt = spks_cnt + 1;
next_spike = spks_i[spks_cnt] + Tref_ind;
V[t-1] = 0 ;
V[t] = Vr ;
t=t-1;
}
}
"""
vars = [ 'p_T','p_dt','p_gl','p_C','p_El','p_Vr','p_Tref','V','I','eta_sum','spks_i' ]
v = weave.inline(code, vars)
time = np.arange(p_T)*self.dt
eta_sum = eta_sum[:p_T]
return (time, V, eta_sum)
def fit(self, experiment, DT_beforeSpike = 5.0):
"""
Fit the GIF model on experimental data.
The experimental data are stored in the object experiment.
The parameter DT_beforeSpike (in ms) defines the region that is cut before each spike when fitting the subthreshold dynamics of the membrane potential.
Only training set traces in experiment are used to perform the fit.
"""
# Three step procedure used for parameters extraction
print "\n################################"
print "# Fit GIF"
print "################################\n"
self.fitVoltageReset(experiment, self.Tref, do_plot=False)
(var_explained_dV, var_explained_V) = self.fitSubthresholdDynamics(experiment, DT_beforeSpike=DT_beforeSpike)
self.fitStaticThreshold(experiment)
self.fitThresholdDynamics(experiment)
return (var_explained_dV, var_explained_V)
########################################################################################################
# FIT VOLTAGE RESET GIVEN ABSOLUTE REFRACOTORY PERIOD (step 1)
########################################################################################################
def fitVoltageReset(self, experiment, Tref, do_plot=False):
"""
Tref: ms, absolute refractory period.
The voltage reset is estimated by computing the spike-triggered average of the voltage.
"""
print "Estimate voltage reset (Tref = %0.1f ms)..." % (Tref)
# Fix absolute refractory period
self.dt = experiment.dt
self.Tref = Tref
all_spike_average = []
all_spike_nb = 0
for tr in experiment.trainingset_traces :
if tr.useTrace :
loc_count = 0
if len(tr.spks) > 0 :
(support, spike_average, spike_nb) = tr.computeAverageSpikeShape()
all_spike_average.append(spike_average)
all_spike_nb += spike_nb
loc_count += 1
print "Local counter = %d" % (loc_count)
spike_average = np.mean(all_spike_average, axis=0)
# Estimate voltage reset
Tref_ind = np.where(support >= self.Tref)[0][0]
self.Vr = spike_average[Tref_ind]
# Save average spike shape
self.avg_spike_shape = spike_average
self.avg_spike_shape_support = support
if do_plot :
plt.figure()
plt.plot(support, spike_average, 'black')
plt.plot([support[Tref_ind]], [self.Vr], '.', color='red')
plt.show()
print "Done! Vr = %0.2f mV (computed on %d spikes)" % (self.Vr, all_spike_nb)
########################################################################################################
# FUNCTIONS RELATED TO FIT OF SUBTHRESHOLD DYNAMICS (step 2)
########################################################################################################
def fitSubthresholdDynamics(self, experiment, DT_beforeSpike=5.0):
print "\nGIF MODEL - Fit subthreshold dynamics..."
# Expand eta in basis functions
self.dt = experiment.dt
self.eta.computeBins()
# Build X matrix and Y vector to perform linear regression (use all traces in training set)
X = []
Y = []
cnt = 0
for tr in experiment.trainingset_traces :
if tr.useTrace :
cnt += 1
reprint( "Compute X matrix for repetition %d" % (cnt) )
(X_tmp, Y_tmp) = self.fitSubthresholdDynamics_Build_Xmatrix_Yvector(tr, DT_beforeSpike=DT_beforeSpike)
X.append(X_tmp)
Y.append(Y_tmp)
# Concatenate matrixes associated with different traces to perform a single multilinear regression
if cnt == 1:
X = X[0]
Y = Y[0]
elif cnt > 1:
X = np.concatenate(X, axis=0)
Y = np.concatenate(Y, axis=0)
else :
print "\nError, at least one training set trace should be selected to perform fit."
# Linear Regression
print "\nPerform linear regression..."
XTX = np.dot(np.transpose(X), X)
XTX_inv = inv(XTX)
XTY = np.dot(np.transpose(X), Y)
b = np.dot(XTX_inv, XTY)
b = b.flatten()
# Update and print model parameters
self.C = 1./b[1]
self.gl = -b[0]*self.C
self.El = b[2]*self.C/self.gl
self.eta.setFilter_Coefficients(-b[3:]*self.C)
self.printParameters()
# Compute percentage of variance explained on dV/dt
var_explained_dV = 1.0 - np.mean((Y - np.dot(X,b))**2)/np.var(Y)
print "Percentage of variance explained (on dV/dt): %0.2f" % (var_explained_dV*100.0)
# Compute percentage of variance explained on V
SSE = 0 # sum of squared errors
VAR = 0 # variance of data
for tr in experiment.trainingset_traces :
if tr.useTrace :
# Simulate subthreshold dynamics
(time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes())
indices_tmp = tr.getROI_FarFromSpikes(DT_beforeSpike, self.Tref)
SSE += sum((V_est[indices_tmp] - tr.V[indices_tmp])**2)
VAR += len(indices_tmp)*np.var(tr.V[indices_tmp])
var_explained_V = 1.0 - SSE / VAR
print "Percentage of variance explained (on V): %0.2f" % (var_explained_V*100.0)
return (var_explained_dV*100.0, var_explained_V*100.0)
def fitSubthresholdDynamics_Build_Xmatrix_Yvector(self, trace, DT_beforeSpike=5.0):
# Length of the voltage trace
Tref_ind = int(self.Tref/trace.dt)
# Select region where to perform linear regression
selection = trace.getROI_FarFromSpikes(DT_beforeSpike, self.Tref)
selection_l = len(selection)
# Build X matrix for linear regression
X = np.zeros( (selection_l, 3) )
# Fill first two columns of X matrix
X[:,0] = trace.V[selection]
X[:,1] = trace.I[selection]
X[:,2] = np.ones(selection_l)
# Compute and fill the remaining columns associated with the spike-triggered current eta
X_eta = self.eta.convolution_Spiketrain_basisfunctions(trace.getSpikeTimes() + self.Tref, trace.T, trace.dt)
X = np.concatenate( (X, X_eta[selection,:]), axis=1 )
# Build Y vector (voltage derivative)
# COULD BE A BETTER SOLUTION IN CASE OF EXPERIMENTAL DATA (NOT CLEAR WHY)
#Y = np.array( np.concatenate( ([0], np.diff(trace.V)/trace.dt) ) )[selection]
#Better approximation for the derivative (modification by AP, september 2017)
Y = np.gradient(trace.V, trace.dt)[selection]
# CORRECT SOLUTION TO FIT ARTIFICIAL DATA
#Y = np.array( np.concatenate( (np.diff(trace.V)/trace.dt, [0]) ) )[selection]
return (X, Y)
########################################################################################################
# FUNCTIONS RELATED TO FIT FIRING THRESHOLD PARAMETERS (step 3)
########################################################################################################
def fitStaticThreshold(self, experiment):
self.setDt(experiment.dt)
# Start by fitting a constant firing threshold, the result is used as initial condition to fit dynamic threshold
print "\nFit static threshold...\n"
# Define initial conditions (based on the average firing rate in the training set)
nbSpikes = 0
duration = 0
for tr in experiment.trainingset_traces :
if tr.useTrace :
nbSpikes += tr.getSpikeNb_inROI()
duration += tr.getTraceLength_inROI()
mean_firingrate = 1000.0*nbSpikes/duration
self.lambda0 = 1.0
self.DV = 50.0
self.Vt_star = -np.log(mean_firingrate)*self.DV
# Perform fit
beta0_staticThreshold = [1/self.DV, -self.Vt_star/self.DV]
beta_opt = self.maximizeLikelihood(experiment, beta0_staticThreshold, self.buildXmatrix_staticThreshold)
# Store result
self.DV = 1.0/beta_opt[0]
self.Vt_star = -beta_opt[1]*self.DV
self.gamma.setFilter_toZero()
self.printParameters()
def fitThresholdDynamics(self, experiment):
self.setDt(experiment.dt)
# Fit a dynamic threshold using a initial condition the result obtained by fitting a static threshold
print "\nFit dynamic threshold...\n"
# Perform fit
beta0_dynamicThreshold = np.concatenate( ( [1/self.DV], [-self.Vt_star/self.DV], self.gamma.getCoefficients()/self.DV))
beta_opt = self.maximizeLikelihood(experiment, beta0_dynamicThreshold, self.buildXmatrix_dynamicThreshold)
# Store result
self.DV = 1.0/beta_opt[0]
self.Vt_star = -beta_opt[1]*self.DV
self.gamma.setFilter_Coefficients(-beta_opt[2:]*self.DV)
self.printParameters()
def maximizeLikelihood(self, experiment, beta0, buildXmatrix, maxIter=10**3, stopCond=10**-6) :
"""
Maximize likelihood. This function can be used to fit any model of the form lambda=exp(Xbeta).
Here this function is used to fit both:
- static threshold
- dynamic threshold
The difference between the two functions is in the size of beta0 and the returned beta, as well
as the function buildXmatrix.
"""
# Precompute all the matrices used in the gradient ascent
all_X = []
all_X_spikes = []
all_sum_X_spikes = []
T_tot = 0.0
N_spikes_tot = 0.0
traces_nb = 0
for tr in experiment.trainingset_traces:
if tr.useTrace :
traces_nb += 1
# Simulate subthreshold dynamics
(time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes())
# Precomputes matrices to perform gradient ascent on log-likelihood
(X_tmp, X_spikes_tmp, sum_X_spikes_tmp, N_spikes, T) = buildXmatrix(tr, V_est)
T_tot += T
N_spikes_tot += N_spikes
all_X.append(X_tmp)
all_X_spikes.append(X_spikes_tmp)
all_sum_X_spikes.append(sum_X_spikes_tmp)
logL_poisson = N_spikes_tot*(np.log(N_spikes_tot/T_tot)-1)
# Perform gradient ascent
print "Maximize log-likelihood (bit/spks)..."
beta = beta0
old_L = 1
for i in range(maxIter) :
learning_rate = 1.0
if i<=10 : # be careful in the first iterations (using a small learning rate in the first step makes the fit more stable)
learning_rate = 0.1
L=0; G=0; H=0;
for trace_i in np.arange(traces_nb):
(L_tmp,G_tmp,H_tmp) = self.computeLikelihoodGradientHessian(beta, all_X[trace_i], all_X_spikes[trace_i], all_sum_X_spikes[trace_i])
L+=L_tmp; G+=G_tmp; H+=H_tmp;
beta = beta - learning_rate*np.dot(inv(H),G)
if (i>0 and abs((L-old_L)/old_L) < stopCond) : # If converged
print "\nConverged after %d iterations!\n" % (i+1)
break
old_L = L
# Compute normalized likelihood (for print)
# The likelihood is normalized with respect to a poisson process and units are in bit/spks
L_norm = (L-logL_poisson)/np.log(2)/N_spikes_tot
reprint(L_norm)
if (i==maxIter - 1) : # If too many iterations
print "\nNot converged after %d iterations.\n" % (maxIter)
return beta
def computeLikelihoodGradientHessian(self, beta, X, X_spikes, sum_X_spikes) :
# IMPORTANT: in general we assume that the | |
i, e in enumerate(c2l):
#e1 = e; #.copy();
e.append(i);
c2lSorted.append(e);
c2lSorted.sort();
#common.DebugPrint("unique(): c2lSorted = %s" % str(c2lSorted));
c2lSortedIndex = [];
for e in c2lSorted:
c2lSortedIndex.append(e[2]);
#common.DebugPrint("unique(): c2lSortedIndex = %s" % str(c2lSortedIndex));
#quit();
"""
np.argsort() is a crappy function in the end...
Unfortunately I don't understand the output of np.argsort() when a is
bi/multi-dimensional... - and it's not my fault - see
http://stackoverflow.com/questions/12496531/sort-numpy-float-array-column-by-column
and http://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html
"""
if False:
#c2i = []
# Does NOT return c2i: c2 = np.sort(a=c2, axis=0, kind="quicksort")
c2i = np.argsort(a=c2, axis=0, kind="quicksort");
assert len(c2i) == len(c2);
if common.MY_DEBUG_STDOUT:
common.DebugPrint("unique(): c2i = %s" % str(c2i));
c2i = c2i[:, 0]; # c2i returned is a list of lists of 2 elements: c2i[k][1] is the index of the kth element in c2i
c2i = np.array(c2lSortedIndex);
#common.DebugPrint("unique(): c2i = %s" % str(c2i));
# This is not good since the last element is the position: c2 = np.array(c2lSorted);
try:
c2 = c2[c2i];
except:
#common.DebugPrint("c2 = %s" % str(c2))
#common.DebugPrint("c2i = %s" % str(c2i))
return np.array([]), np.array([]);
#quit()
#common.DebugPrint("unique(): c2 = %s" % str(c2));
#common.DebugPrint("c2 (after indirect sorting) = %s" % str(c2));
c2F = []; #np.array([])
c2iF = [];
#rowFirst = 0;
#for row in range(c2i.size - 1):
row = 0;
#for row in range(c2i.size - 1):
while row < c2i.size:
#print "row = %d" % row;
c2F.append(c2[row].tolist());
c2iF.append(c2i[row]);
#common.DebugPrint("row = %d" % row);
if row + 1 < c2i.size:
rowCrt = row + 1;
while rowCrt < c2i.size:
#common.DebugPrint("rowCrt = %d" % rowCrt);
"""
# This comparison is NOT general - it assumes
# np.dims(c2) == (X, 2).
if (c2[row][0] == c2[rowCrt][0]) and \
(c2[row][1] == c2[rowCrt][1]):
"""
"""
Test that the 2 rows are identical
(each pair of corresponding elements are equal)
"""
if (c2[row] == c2[rowCrt]).all():
pass;
else:
break;
rowCrt += 1;
row = rowCrt;
else:
row += 1;
"""
#for row in range(c2i.size - 1):
while row < c2i.size:
#print "row = %d" % row
if row == c2i.size - 1:
c2F.append(c2[row].tolist());
c2iF.append(c2i[row]);
for rowCrt in range(row + 1, c2i.size):
#print "rowCrt = %d" % rowCrt
if (c2[row][0] == c2[rowCrt][0]) and \
(c2[row][1] == c2[rowCrt][1]):
row += 1;
pass;
else:
c2F.append(c2[row].tolist());
c2iF.append(c2i[row]);
#rowFirst += 1;
break
row += 1;
"""
#print "unique(): c2F = %s" % (str(c2F));
#print "unique(): c2iF = %s" % (str(c2iF));
c2F = np.array(c2F);
c2iF = np.array(c2iF);
return c2F, c2iF;
def hamming(N):
"""
Alex: replaced Matlab "Signal Processing Toolbox"'s hamming() with my own
simple definition - inspired from
http://www.mathworks.com/matlabcentral/newsreader/view_thread/102510
"""
t = np.array( range(N) )
b = np.zeros( (N) )
if N == 0:
return b;
elif N == 1:
b[0] = 1;
return b;
#print "hamming(): b.shape = %s" % str(b.shape);
#print "hamming(): t.shape = %s" % str(t.shape);
#b[t] = 0.54 - 0.46 * math.cos(2 * math.pi * (t - 1) / (N - 1));
b[t] = 0.54 - 0.46 * np.cos(2 * math.pi * t / float(N - 1));
return b;
"""
h is a vector... - see use below filter2(b, ...)
From Matlab help:
Syntax
Y = filter2(h,X)
Y = filter2(h,X,shape)
Description
Y = filter2(h,X) filters
the data in X with the two-dimensional FIR filter
in the matrix h. It computes the result, Y,
using two-dimensional correlation, and returns the central part of
the correlation that is the same size as X.Y = filter2(h,X,shape) returns
the part of Y specified by the shape parameter. shape is
a string with one of these values:
Given a matrix X and a two-dimensional FIR
filter h, filter2 rotates
your filter matrix 180 degrees to create a convolution kernel. It
then calls conv2, the two-dimensional convolution
function, to implement the filtering operation.filter2 uses conv2 to
compute the full two-dimensional convolution of the FIR filter with
the input matrix. By default, filter2 then extracts
the central part of the convolution that is the same size as the input
matrix, and returns this as the result. If the shape parameter
specifies an alternate part of the convolution for the result, filter2 returns
the appropriate part.
"""
# https://stackoverflow.com/questions/16278938/convert-matlab-to-opencv-in-python
def filter2(window, src):
assert len(window.shape) == 1
# In certain cases it's unusual that we have a window that is 1D and x is 2D
#common.DebugPrint("filter2(): src.shape = %s" % str(src.shape))
#common.DebugPrint("filter2(): window.shape = %s" % str(window.shape))
#common.DebugPrint("filter2(): src = %s" % str(src))
#common.DebugPrint("filter2(): window = %s" % str(window))
# From http://docs.opencv.org/modules/imgproc/doc/filtering.html#cv.Filter2D
#cv2.filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst
#res = cv2.filter2D(src=window, ddepth=-1, kernel=x)
#res = cv2.filter2D(src=x, ddepth=-1, kernel=window, borderType=cv2.BORDER_REFLECT) #cv2.BORDER_CONSTANT) #cv2.BORDER_ISOLATED) #cv2.BORDER_TRANSPARENT)
"""
# OpenCV's formula to compute filter2 is dst(x,y)= \sum...
As we can see they consider the matrix being in column-major order, so we need to transpose the matrices
Doing so we obtain the right values at the 4 borders.
"""
src = src.T;
#window = window.T
# Note: CvPoint is (x, y)
res = cv2.filter2D(src=src, ddepth=-1, kernel=window, anchor=(-1, -1), \
borderType=cv2.BORDER_ISOLATED); #BORDER_DEFAULT) #BORDER_CONSTANT) #cv2.BORDER_TRANSPARENT)
res = res.T;
if False:
# Alex's implementation following http://docs.opencv.org/modules/imgproc/doc/filtering.html#filter2d
dst = np.zeros( (src.shape[0], src.shape[1]), dtype=np.float64);
for y in range(src.shape[1]):
for x in range(src.shape[0]):
for xp in range(window.shape[0]):
"""
if (y >= src.shape[0]) or (x + xp - 1 >= src.shape[1]) or \
(x + xp - 1 < 0):
"""
if (y >= src.shape[1]) or (x + xp - 1 >= src.shape[0]) or \
(x + xp - 1 < 0):
pass;
else:
#dst[y, x] += window[xp] * src[y, x + xp - 1];
dst[x, y] += window[xp] * src[x + xp - 1, y];
res = dst;
return res;
"""
"""
if False:
range1 = src.shape[0] - window.shape[0] + 1;
#range2 = src.shape[1] - window.shape[1] + 1;
range2 = src.shape[1] - window.shape[0] + 1;
if range2 < 0:
range2 = 1;
res = np.zeros((range1, range2), dtype=np.float64);
for i in range(range1):
for j in range(range2):
#common.DebugPrint("filter2(): j = %d" % j)
#res[i, j] = np.sum(np.multiply(x[i: 11 + i, j: 11 + j], window))
res[i, j] = np.sum( \
np.multiply(src[i: window.shape[0] + i, j: window.shape[0] + j], window));
# From https://codereview.stackexchange.com/questions/31089/optimizing-numpy-code - optimized version using as_strided and sum instead of nested loops
if False:
# Gives exception: "ValueError: negative dimensions are not allowed"
x1 = np.lib.stride_tricks.as_strided(x, \
((src.shape[0] - 10) / 1, (src.shape[1] - 10) / 1, 11, 11), \
(src.strides[0] * 1, src.strides[1] * 1, \
src.strides[0], src.strides[1])) * window;
res = x1.sum((2, 3));
return res;
def gradient(img, spaceX=1, spaceY=1, spaceZ=1):
assert (img.ndim == 2) or (img.ndim == 3);
#if img.ndim == 3:
# assert spaceZ != None
"""
From Matlab help:
Description:
FX = gradient(F), where F is
a vector, returns the one-dimensional numerical gradient of F.
Here FX corresponds to deltaF/deltax,
the differences in x (horizontal) direction. [FX,FY] = gradient(F),
where F is a matrix, returns the x and y components
of the two-dimensional numerical gradient. FX corresponds
to deltaF/deltax, the
differences in x (horizontal) direction. FY corresponds
to deltaF/deltay, the
differences in the y (vertical) direction. The
spacing between points in each direction is assumed to be one.
[FX,FY,FZ,...] = gradient(F),
where F has N dimensions, returns
the N components of the gradient of F.
There are two ways to control the spacing between values in F:
A single spacing value, h, specifies
the spacing between points in every direction.
N spacing values (h1,h2,...)
specifies the spacing for each dimension of F.
Scalar spacing parameters specify a constant spacing for each dimension.
Vector parameters specify the coordinates of the values along corresponding
dimensions of F. In this case, the length of the
vector must match the size of the corresponding dimension.
Note:
The first output FX is always the gradient
along the 2nd dimension of F, going across columns.
The second output FY is always the gradient along
the 1st dimension of F, going across rows. For
the third output FZ and the outputs that follow,
the Nth output is the gradient along the Nth dimension | |
a `time` attribute. They do not need to be sorted. This
parameter is needed to numerically estimate the time derivative of
the `c` coefficient of the time evolution vectors (used only in
option 4, see below).
@param steps
How many curves in the future and past of this curve to consider
from `curves`. Default is `3`.
@param option
For debugging/exploration purposes. Use different definitions of
\f$\tau\f$ to experiment with. The options are (**note** that for
option 2 we return \f$\tau\f$ instead of \f$\tau^2\f$):
* option 0: default, the one given in the above equation
* option 1: use \f$1/\tau^2 = \frac{1}{A} \int_{\mathcal{S}} \Theta_{(V)}^2\ dA\f$
* option 2: use \f$1/\tau = \frac{1}{A} \int_{\mathcal{S}} \Theta_{(V)}\ dA\f$
* option 3: use \f$1/\tau^2 = \frac{1}{A} \int_{\mathcal{S}} [\kappa^{(V)} \Theta_{(V)} - \frac12 \Theta_{(V)}^2] \ dA\f$
* option 4: use \f$1/\tau^2 = \frac{1}{A} \int_{\mathcal{S}} [\kappa^{(V)} \Theta_{(V)} - \frac12 \Theta_{(V)}^2 - \Theta_{(V)} \frac{d}{dt} \ln c] \ dA\f$
"""
if len(kappas_tev) != len(proper_pts):
raise ValueError("Surface gravity sampled on incorrect number of points.")
n = len(kappas_tev)
kappas_tev = np.asarray(kappas_tev)
with self.fix_evaluator():
area_element = self.get_area_integrand()
length_map = self.cached_length_maps()[0]
def _integrand(xs):
proper_params = [length_map(param) for param in xs]
cmp_opts = dict(rtol=1e-10, atol=0.0)
if (not np.allclose(proper_params, proper_pts, **cmp_opts) or
not np.allclose(xs, [tev.param for tev in tevs], **cmp_opts)):
print("WARNING: Objects not sampled at quadrature points. "
"Integral value might be incorrect.")
measure = np.asarray([area_element(x) for x in xs])
area = 2*np.pi * _fixed_quad(lambda _: measure, a=0.0, b=np.pi, n=n)
ingoing_exp = np.asarray(self.expansions(xs, ingoing=True))
ingoing_exp = ingoing_exp / np.sqrt(2) # convert to l*k = -1
c = np.asarray([tev.c_B for tev in tevs])
theta_V = c * ingoing_exp
if option == 0:
integrand = kappas_tev * theta_V
elif option == 1:
integrand = theta_V**2
elif option == 2:
integrand = theta_V # The result will be tau not tau^2 here!
elif option == 3:
integrand = kappas_tev * theta_V - 0.5 * theta_V**2
elif option == 4:
dt_ln_c = self._compute_dtau_c(curves=curves, steps=steps, tevs=tevs) / c
integrand = (
kappas_tev * theta_V - 0.5 * theta_V**2
- theta_V * dt_ln_c
)
return 1/area * integrand * measure
integral = 2*np.pi * _fixed_quad(_integrand, a=0.0, b=np.pi, n=n)
return 1/integral
def _compute_dtau_c(self, curves, steps, tevs):
curves = self.collect_close_in_time(curves, steps=steps)
t0 = self.metric.time
lm = self.cached_length_maps()[0]
pts = [lm(v.param) for v in tevs]
epsilons = [v.eps for v in tevs]
def _c(curve):
r"""Return shape=(len(pts),) array with `c` parameters at each point."""
c_tevs = curve.user_data["tev_divergence"]["vectors"]
c_c_interp = CubicSpline(
[v.param for v in c_tevs], [v.c_B for v in c_tevs],
)
c_im = curve.cached_length_maps()[1]
t = curve.metric.time
results = []
for proper_param, eps in zip(pts, epsilons):
param = c_im(clip(proper_param + eps*(t-t0), 0.0, np.pi))
results.append(c_c_interp(param))
return np.asarray(results)
# shape=(len(curves), len(pts))
c_along_tube = np.asarray([_c(curve) for curve in curves])
dtau_c = []
for i, (proper_param, vec) in enumerate(zip(pts, tevs)):
tau = vec.tau
c_interp = CubicSpline(
[c.metric.time for c in curves],
[c_along_tube[j,i] for j in range(len(curves))],
)
# \partial_\tau c
dtau_c.append(c_interp(t0, 1))
dtau_c = np.asarray(dtau_c)
return dtau_c
def timescale_T2(self, tevs):
r"""Compute the square of the timescale `T`.
This computes
\f[
T^2 := \left(
\frac{1}{A} \int_{\mathcal{S}} \sigma^{(V)}_{AB}\sigma^{(\tau)\,AB}\ dA
\right)^{-1} \,,
\f]
where `A` is the MOTS's surface area, `V` is the time evolution vector
(scaled to have unity time component) with
\f[
V^\mu = b \ell^\mu + c k^\mu \,.
\f]
Then, \f$\tau^\mu = b \ell^\mu - c k^\mu\f$. \f$\ell\f$ and \f$k\f$
are the outgoing and ingoing null normals with scaling
\f$\ell \cdot k = -1\f$.
@param tevs
List of `n` time evolution vector objects at the Gaussian
quadrature points for an integral of order `n`.
"""
n = len(tevs)
with self.fix_evaluator():
area_element = self.get_area_integrand()
length_map = self.cached_length_maps()[0]
def integrand(xs):
proper_params = [length_map(param) for param in xs]
cmp_opts = dict(rtol=1e-10, atol=0.0)
if not np.allclose(xs, [tev.param for tev in tevs], **cmp_opts):
print("WARNING: Objects not sampled at quadrature points. "
"Integral value might be incorrect.")
sigma_V_sigma_tau = []
for param, tev in zip(xs, tevs):
stab_calc = self.get_stability_calc_obj(param)
sigma_ell2 = stab_calc.compute_shear_squared()
sigma_k2 = stab_calc.compute_shear_k(full_output=True)[1]
sigma_V_sigma_tau.append(
tev.b_B**2 * sigma_ell2 - tev.c_B**2 * sigma_k2
)
sigma_V_sigma_tau = np.asarray(sigma_V_sigma_tau)
measure = np.asarray([area_element(x) for x in xs])
area = 2*np.pi * _fixed_quad(lambda _: measure, a=0.0, b=np.pi, n=n)
return 1/area * sigma_V_sigma_tau * measure
integral = 2*np.pi * _fixed_quad(integrand, a=0.0, b=np.pi, n=n)
return 1/integral
def xi_vector(self, pts, curves, steps=3, r_hat=True, full_output=False,
get_tevs=False, tevs=None):
r"""Compute the xi vector.
The quantity computed here is called \f$\zeta^A\f$ in eq. (3.10) in
Ref. [1], see also eq. (3.19) in Ref. [2]. We compute
\f[
\xi^A := q^{AB} \hat r^\mu \nabla_\mu \ell_B \,,
\f]
where \f$\hat r^\mu\f$ is the normalized time evolution vector (see
time_evolution_vector()), \f$q^{AB}\f$ the inverse 2-metric on the
MOTS, and \f$\ell^\mu\f$ the future pointing outgoing null normal to
the MOTS. Note that we use the scaling \f$k^\mu \ell_\mu = -1\f$ here,
where \f$k^\mu\f$ is the future pointing ingoing null normal.
**Note:** This quantity depends on the scaling of the null normals. We
construct the null normals in a slicing dependent way using
\f[
\ell^\mu = \frac{1}{\sqrt{2}} (n^\mu + v^\mu) \,,
\qquad
k^\mu = \frac{1}{\sqrt{2}} (n^\mu - v^\mu) \,,
\f]
where \f$n^\mu\f$ is the future pointing normal on the slice and
\f$v^\mu\f$ the outward normal of the MOTS within the slice. This
choice leads to \f$\ell^\mu k_\mu = -1\f$. However, you can scale both
null normals by an arbitrary function in a way to keep the condition
\f$\ell^\mu k_\mu = -1\f$. In Ref. [2], a different choice is made,
namely
\f[
\ell^\mu = \hat\tau^\mu + \hat r^\mu \,,
\f]
where \f$\hat\tau^\mu\f$ is the normalized outward normal to the MOTT
(i.e. a timelike vector for spacelike surfaces like dynamical
horizons) and \f$\hat r^\mu\f$ the time evolution vector, i.e. the
tangent to the MOTT which is also a normal to the MOTS. Hence,
quantitative results presented in [2] will not hold for the vector
computed here.
@return Components of \f$\xi^A\f$ as a 2-element array. If
``full_output=True``, return \f$\xi^A\f$, \f$\xi_A\f$,
\f$|\xi|^2\f$, \f$\xi_{(\ell)}\f$, where
\f$\xi_{(\ell)} := \overline{m}^A \xi_A\f$. Here
\f$\overline{m}^\mu\f$ is one of the components of the complex
null tetrad \f$(\ell,k,m,\overline{m})\f$.
@param pts
Proper length parameters at which to compute the results. Each
point should be in the range ``(0, pi)``, where a value of e.g.
``pi/2`` refers to a point on the MOTS dividing the curve into two
parts of equal proper length.
@param curves
List of curves that build up the tube. The current object (`self`)
may or may not be in that list. Each curve needs to have a metric
with a `time` attribute. They do not need to be sorted. This
parameter is needed to numerically estimate the time derivative of
the normal to the MOTS in the slice.
@param steps
How many curves in the future and past of this curve to consider
from `curves`. Default is `3`.
@param r_hat
Whether to compute \f$\xi^A\f$ with the normalized vector
\f$\hat r^\mu := V^\mu/|V|\f$ or with the slicing-adapted evolution
vector \f$\mathcal{V}^\mu\f$ (see .curve.expcurve.TimeVectorData()
for the difference). Default is `True`, i.e. to use the normalized
vector.
@param full_output
Whether to return just the contravariant components of the xi
vector or also the covariant, the square, and the complex scalar.
See above for details. Default is `False`.
@param get_tevs
Add the time evolution vectors at the requested points as list to
the output.
@param tevs
Optional sequence of evolution vectors corresponding to the points
given in `pts`. If not specified, will compute them.
@b References
[1] Ashtekar, Abhay, <NAME>, and <NAME>. "Dynamical black holes:
Approach to the final state." Physical Review D 88.6 (2013): 064045.
[2] Ashtekar, Abhay, and <NAME>. "Dynamical horizons and their
properties." Physical Review D 68.10 (2003): 104030.
"""
with self.fix_evaluator():
return self._xi_vector(pts, curves, steps, r_hat, full_output,
get_tevs, tevs)
def _xi_vector(self, pts, curves, steps, r_hat, full_output, get_tevs,
tevs):
r"""Implements xi_vector()."""
dt_normals, tevs = self.dt_normals(
pts, curves, steps=steps, use_tev=True, full_output=True,
tevs=tevs,
)
xi_A_up = []
xi_A = []
xi2 = []
xi_scalar = | |
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(JobPreparationTask, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.command_line = kwargs.get('command_line', None)
self.container_settings = kwargs.get('container_settings', None)
self.resource_files = kwargs.get('resource_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.constraints = kwargs.get('constraints', None)
self.wait_for_success = kwargs.get('wait_for_success', None)
self.user_identity = kwargs.get('user_identity', None)
self.rerun_on_node_reboot_after_success = kwargs.get('rerun_on_node_reboot_after_success', None)
class JobReleaseTask(Model):
"""A Job Release task to run on job completion on any compute node where the
job has run.
The Job Release task runs when the job ends, because of one of the
following: The user calls the Terminate Job API, or the Delete Job API
while the job is still active, the job's maximum wall clock time constraint
is reached, and the job is still active, or the job's Job Manager task
completed, and the job is configured to terminate when the Job Manager
completes. The Job Release task runs on each compute node where tasks of
the job have run and the Job Preparation task ran and completed. If you
reimage a compute node after it has run the Job Preparation task, and the
job ends without any further tasks of the job running on that compute node
(and hence the Job Preparation task does not re-run), then the Job Release
task does not run on that node. If a compute node reboots while the Job
Release task is still running, the Job Release task runs again when the
compute node starts up. The job is not marked as complete until all Job
Release tasks have completed. The Job Release task runs in the background.
It does not occupy a scheduling slot; that is, it does not count towards
the maxTasksPerNode limit specified on the pool.
:param id: A string that uniquely identifies the Job Release task within
the job. The ID can contain any combination of alphanumeric characters
including hyphens and underscores and cannot contain more than 64
characters. If you do not specify this property, the Batch service assigns
a default value of 'jobrelease'. No other task in the job can have the
same ID as the Job Release task. If you try to submit a task with the same
ID, the Batch service rejects the request with error code
TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the
HTTP status code is 409 (Conflict).
:type id: str
:param command_line: The command line of the Job Release task. The command
line does not run under a shell, and therefore cannot take advantage of
shell features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
Job Release task runs. When this is specified, all directories recursively
below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on
the node) are mapped into the container, all task environment variables
are mapped into the container, and the task command line is executed in
the container.
:type container_settings: :class:`TaskContainerSettings
<azure.batch.models.TaskContainerSettings>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the Job Release task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param max_wall_clock_time: The maximum elapsed time that the Job Release
task may run on a given compute node, measured from the time the task
starts. If the task does not complete within the time limit, the Batch
service terminates it. The default value is 15 minutes. You may not
specify a timeout longer than 15 minutes. If you do, the Batch service
rejects it with an error; if you are calling the REST API directly, the
HTTP status code is 400 (Bad Request).
:type max_wall_clock_time: timedelta
:param retention_time: The minimum time to retain the task directory for
the Job Release task on the compute node. After this time, the Batch
service may delete the task directory and all its contents. The default is
infinite, i.e. the task directory will be retained until the compute node
is removed or reimaged.
:type retention_time: timedelta
:param user_identity: The user identity under which the Job Release task
runs. If omitted, the task runs as a non-administrative user unique to the
task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ExtendedResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'retention_time': {'key': 'retentionTime', 'type': 'duration'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
}
def __init__(self, **kwargs):
super(JobReleaseTask, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.command_line = kwargs.get('command_line', None)
self.container_settings = kwargs.get('container_settings', None)
self.resource_files = kwargs.get('resource_files', None)
self.environment_settings = kwargs.get('environment_settings', None)
self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None)
self.retention_time = kwargs.get('retention_time', None)
self.user_identity = kwargs.get('user_identity', None)
class JobTemplate(Model):
"""A Job Template.
:ivar type: The type of object described by the template. Must be:
"Microsoft.Batch/batchAccounts/jobs"
:type type: str
:param api_version: The API version that the template conforms to.
:type api_version: str
:param properties: The specificaton of the job.
:type properties: :class:`ExtendedJobParameter<azext.batch.models.ExtendedJobParameter>`
"""
_validation = {
'type': {'required': True, 'constant': True},
'properties': {'required': True},
}
_attribute_map = {
'type': {'key': 'id', 'type': 'str'},
'api_version': {'key': 'apiVersion', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ExtendedJobParameter'},
}
type = "Microsoft.Batch/batchAccounts/jobs"
def __init__(self, **kwargs):
super(JobTemplate, self).__init__(**kwargs)
self.properties = kwargs.get('properties')
self.api_version = kwargs.get('api_version', None)
class MergeTask(Model):
"""An Azure Batch task template to repeat.
:param str id: The ID of the merge task.
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: The command line of the task. For multi-instance
tasks, the command line is executed as the primary task, after the primary
task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux.
:type command_line: str
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: :class:`ExitConditions
<azure.batch.models.ExitConditions>`
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. For
multi-instance tasks, the resource files will only be downloaded to the
compute node on which the primary task is executed.
:type resource_files: list of :class:`ExtendedResourceFile
<azext.batch.models.ExtendedResourceFile>`
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: :class:`AffinityInformation
<azure.batch.models.AffinityInformation>`
:param constraints: The execution constraints that apply to this task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the job, and the maxWallClockTime and
retentionTime are infinite.
:type constraints: :class:`TaskConstraints
<azure.batch.models.TaskConstraints>`
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param depends_on: | |
mv(ow,12)
yield from align.m1pit
yield from m3_check()
#FIX E-SCANS
yield from mv(pgm.en,EA)
yield from sleep(5)
yield from scan([sclr,qem12],cryo.t,6,23,ow,12,46,86, md = {'reason':'Reflectivity @ 526.3 eV'})
yield from mv(pgm.en,EB)
yield from sleep(5)
yield from scan([sclr,qem12],cryo.t,23,6,ow,46,12,86, md = {'reason':'Reflectivity @ 527.75 eV'})
yield from mv(pgm.en,Ebg)
yield from sleep(5)
yield from scan([sclr,qem12],cryo.t,6,23,ow,12,46,86, md = {'reason':'Reflectivity @ 524.5 eV'})
#FIX Q-SCANS
yield from mv(cryo.t,14)
yield from mv(ow,28)
yield from scan([sclr,qem12],pgm.en,520,540,201,md={'reason':'Fix-q on LSCuO at 20 K'})
def LNO_test_high_res():
#dets = [rixscam, sclr, ring_curr]
rixscam_exp = 30
m7_pit_vals = None
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(sclr.preset_time, rixscam_exp)
yield from mv(extslt.vg,20, extslt.hg, 150)
yield from pol_H(-2.6)
E = list(np.arange(853,853.85))
yield from sleep(30)
yield from rixscam_acquire_w_shutter(E,m7_pit_vals, 240, 'LNO 20um' )
yield from mv(extslt.vg,10, extslt.hg, 150)
yield from pol_H(-2.6)
E = list(np.arange(853,853.85))
yield from sleep(30)
yield from rixscam_acquire_w_shutter(E,m7_pit_vals, 600, 'LNO 10um' )
yield from mv(rixscam.cam.acquire_time, 5)
yield from mv(sclr.preset_time, 0.1)
yield from count([rixscam])
def ene_calib_ctape():
#dets = [rixscam, sclr, ring_curr]
rixscam_exp = 15
m7_pit_vals = None
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(sclr.preset_time, rixscam_exp)
yield from mv(extslt.vg,10, extslt.hg, 150)
yield from pol_V(3)
E = [851,851.5,852,852.5,853,853.5,854]
yield from sleep(10)
yield from rixscam_acquire_w_shutter(E,m7_pit_vals, 80, 'elastic 10um' )
yield from mv(extslt.vg,20, extslt.hg, 150)
rixscam_exp = 10
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
E = [853]
yield from rixscam_acquire_w_shutter(E,m7_pit_vals, 50, 'elastic 20um' )
yield from mv(extslt.vg,10, extslt.hg, 150)
rixscam_exp = 8
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(m4slt.vg,2.2)
E = [853]
yield from rixscam_acquire_w_shutter(E,m7_pit_vals, 50, 'elastic 10um open M4slt' )
yield from mv(m4slt.vg,1.2)
yield from mv(rixscam.cam.acquire_time, 60)
yield from mv(sclr.preset_time, 0.1)
yield from count([rixscam])
def cff_test_high_res():
#dets = [rixscam, sclr, ring_curr]
rixscam_exp = 30
m7_pit_vals = None
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(sclr.preset_time, rixscam_exp)
yield from mv(extslt.vg,10, extslt.hg, 150)
yield from pol_H(-2.6)
cff_list=[4.6,4.65,4.7,4.75]
for cff in cff_list:
yield from mv(pgm.cff,cff)
yield from sleep(10)
yield from rixscam_acquire_w_shutter([851],m7_pit_vals, 20, 'elastic 10 um' )
def cff_test_medium_res():
#dets = [rixscam, sclr, ring_curr]
rixscam_exp = 180
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(sclr.preset_time, rixscam_exp)
yield from mv(extslt.vg,20, extslt.hg, 150)
yield from rixscam_cff_optimization(extra_md = '500 BL 2500 SP' )
yield from sleep(7200)
yield from rixscam_cff_optimization(extra_md = '500 BL 2500 SP' )
yield from mv(shutterb,'close')
yield from mv(rixscam.cam.acquire_time, 30)
yield from mv(sclr.preset_time, 0.1)
yield from count([rixscam])
def m7_gr_optim():
rixscam_exp = 180
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(sclr.preset_time, rixscam_exp)
yield from mv(extslt.vg,20, extslt.hg, 150)
yield from rixscam_m7_gr_2_axis(extra_md = '500 BL 2500 SP extslt.vg=20' )
rixscam_exp = 300
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(sclr.preset_time, rixscam_exp)
yield from mv(extslt.vg,10, extslt.hg, 150)
yield from rixscam_m7_gr_2_axis(extra_md = '500 BL 2500 SP extslt.vg=10' )
yield from mv(shutterb,'close')
yield from mv(rixscam.cam.acquire_time, 30)
yield from mv(sclr.preset_time, 0.1)
yield from count([rixscam])
def LNO_test():
rixscam_exp = 300
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(sclr.preset_time, rixscam_exp)
E = [853.5,854.5]
m7_pit_vals = None
yield from rixscam_acquire_w_shutter(E,m7_pit_vals, 4, 'LNO 20 um' )
def plan_coso_300(cts):
dets = [rixscam, ring_curr]
rixscam_exp = 2
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(extslt.vg,10, extslt.hg, 150)
yield from pol_H(-8)
#Cu2OSeO3- Cu-L3
E1 = list(np.arange(927,931.09,0.1))
E2 = list(np.arange(931.2,935.09,0.2))
for i in E1+E2:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap COSO 300K'} )
def plan_coso_45(cts):
dets = [rixscam, ring_curr]
rixscam_exp = 5
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(extslt.vg,10, extslt.hg, 150)
yield from pol_H(-8)
#Cu2OSeO3- Cu-L3
E1 = list(np.arange(933.8,935.09,0.2))
for i in E1:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap COSO 45K'} )
def plan_Te_coso_45():
dets = [rixscam, ring_curr]
yield from mv(extslt.vg,10, extslt.hg, 150)
yield from pol_H(-8)
#Cu2OSeO3- Cu-L3
rixscam_exp = 4
cts=60
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
E1 = list(np.arange(927,927.89,0.2))
for i in E1:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap COSO 45K'} )
rixscam_exp = 2
cts=120
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
E1 = list(np.arange(928,928.89,0.2))
for i in E1:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap COSO 45K'} )
rixscam_exp = 1
cts=240
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
E1 = list(np.arange(929,930.29,0.2))
for i in E1:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap COSO 45K'} )
rixscam_exp = 2
cts=120
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
E1 = list(np.arange(930.4,930.89,0.2))
for i in E1:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap COSO 45K'} )
rixscam_exp = 5
cts=48
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
E1 = list(np.arange(931,935.09,0.2))
for i in E1:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap COSO 45K'} )
def plan_coso_45_high_res():
dets = [rixscam, ring_curr]
yield from mv(extslt.vg,15, extslt.hg, 150)
yield from pol_H(-8)
#Cu2OSeO3- Cu-L3
#rixscam_exp = 6
#cts=100
#yield from mv(rixscam.cam.acquire_time, rixscam_exp)
#yield from mv(pgm.en,929.3)
#for i in range(0,5):
# yield from count(dets, num=cts, md = {'reason':'COSO 45K E1'} )
#rixscam_exp = 5
#cts=120
#yield from mv(rixscam.cam.acquire_time, rixscam_exp)
#yield from mv(pgm.en,929.9)
#for i in range(0,5):
# yield from count(dets, num=cts, md = {'reason':'COSO 45K E2'} )
rixscam_exp = 5
cts=120
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(pgm.en,930.6)
for i in range(0,1):
yield from count(dets, num=cts, md = {'reason':'COSO 45K E3'} )
rixscam_exp = 6
cts=100
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(pgm.en,931.1)
for i in range(0,5):
yield from count(dets, num=cts, md = {'reason':'COSO 45K E4'} )
rixscam_exp = 5
cts=120
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(pgm.en,930.2)
for i in range(0,5):
yield from count(dets, num=cts, md = {'reason':'COSO 45K E5'} )
yield from pol_V(-1.1)
rixscam_exp = 5
cts=120
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(pgm.en,930.2)
for i in range(0,5):
yield from count(dets, num=cts, md = {'reason':'COSO 45K E6'} )
yield from pol_H(-8)
rixscam_exp = 6
cts=100
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(pgm.en,929.6)
for i in range(0,5):
yield from count(dets, num=cts, md = {'reason':'COSO 45K E6'} )
yield from mv(gvbt1,'close')
def plan_fe30uc_emap():
dets = [rixscam, ring_curr]
yield from mv(extslt.vg,20, extslt.hg, 150)
#yield from pol_H(-2)
#Fe_L
E1 = list(np.arange(705.4,706.49,0.2)) #8s
E2 = list(np.arange(706.6,707.69,0.2)) #6s
E3 = list(np.arange(707.8,708.09,0.2)) #8s
E4 = list(np.arange(708.5,712.09,0.5)) #8s
rixscam_exp = 8
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
cts=40
for i in E1:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap Fe 30uc'} )
rixscam_exp = 6
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
cts=53
for i in E2:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap Fe 30uc'} )
rixscam_exp = 8
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
cts=40
for i in E3+E4:
yield from mv(pgm.en,i)
yield from sleep(2)
yield from count(dets, num=cts, md = {'reason':'Emap Fe 30uc'} )
yield from mv(gvbt1,'close')
def plan_fe30uc_ecut():
dets = [rixscam, ring_curr]
yield from mv(extslt.vg,11, extslt.hg, 150)
#yield from pol_H(-2)
rixscam_exp = 5
cts=120
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(pgm.en,706.8)
for i in range(0,6):
yield from count(dets, num=cts, md = {'reason':'Fe30uc 706.8eV 130deg'} )
def plan_fe3uc_6uc_ecut():
dets = [rixscam, ring_curr]
yield from mv(extslt.vg,15, extslt.hg, 150)
#yield from pol_H(-2)
rixscam_exp = 5
cts=120
# Fe 3uc
#yield from mv(cryo.y,28.65)
#yield from mv(cryo.x,26.335)
#yield from mv(cryo.z,15.295)
#yield from mv(rixscam.cam.acquire_time, rixscam_exp)
#yield from mv(pgm.en,706.8)
#yield from sleep(5)
#for i in range(0,15):
#yield from count(dets, num=cts, md = {'reason':'Fe3uc 706.8eV 130deg'} )
# Fe 6uc
#yield from mv(cryo.y,24.8)
#yield from mv(cryo.x,26.335)
#yield from mv(cryo.z,15.365)
#yield from mv(pgm.en,706.8)
#yield from sleep(5)
for i in range(0,18):
yield from count(dets, num=cts, md = {'reason':'Fe6uc 706.8eV 130deg'} )
yield from mv(gvbt1,'close')
yield from mv(gvsc1,'close')
yield from mv(shutterb,'close')
def plan_fe3uc_ecut():
dets = [rixscam, ring_curr]
yield from mv(extslt.vg,15, extslt.hg, 150)
#yield from pol_H(-2)
rixscam_exp = 5
cts=120
# Fe 3uc
#yield from mv(cryo.y,28.65)
#yield from mv(cryo.x,26.31)
#yield from mv(cryo.z,15.835)
yield from mv(rixscam.cam.acquire_time, rixscam_exp)
yield from mv(pgm.en,706.8)
yield from sleep(5)
for i in range(0,24):
yield from count(dets, num=cts, md = {'reason':'Fe3uc 706.8eV'} )
yield from mv(gvbt1,'close')
yield from | |
<gh_stars>1-10
# coding: utf-8
import sys
from python_environment_check import check_packages
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# # Machine Learning with PyTorch and Scikit-Learn
# # -- Code Examples
# ## Package version checks
# Add folder to path in order to load from the check_packages.py script:
sys.path.insert(0, '..')
# Check recommended package versions:
d = {
'numpy': '1.21.2',
'matplotlib': '3.4.3',
'pandas': '1.3.2'
}
check_packages(d)
# # Chapter 2 - Training Machine Learning Algorithms for Classification
# ### Overview
#
# - [Artificial neurons – a brief glimpse into the early history of machine learning](#Artificial-neurons-a-brief-glimpse-into-the-early-history-of-machine-learning)
# - [The formal definition of an artificial neuron](#The-formal-definition-of-an-artificial-neuron)
# - [The perceptron learning rule](#The-perceptron-learning-rule)
# - [Implementing a perceptron learning algorithm in Python](#Implementing-a-perceptron-learning-algorithm-in-Python)
# - [An object-oriented perceptron API](#An-object-oriented-perceptron-API)
# - [Training a perceptron model on the Iris dataset](#Training-a-perceptron-model-on-the-Iris-dataset)
# - [Adaptive linear neurons and the convergence of learning](#Adaptive-linear-neurons-and-the-convergence-of-learning)
# - [Minimizing cost functions with gradient descent](#Minimizing-cost-functions-with-gradient-descent)
# - [Implementing an Adaptive Linear Neuron in Python](#Implementing-an-Adaptive-Linear-Neuron-in-Python)
# - [Improving gradient descent through feature scaling](#Improving-gradient-descent-through-feature-scaling)
# - [Large scale machine learning and stochastic gradient descent](#Large-scale-machine-learning-and-stochastic-gradient-descent)
# - [Summary](#Summary)
# # Artificial neurons - a brief glimpse into the early history of machine learning
# ## The formal definition of an artificial neuron
# ## The perceptron learning rule
# # Implementing a perceptron learning algorithm in Python
# ## An object-oriented perceptron API
class Perceptron():
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
b_ : Scalar
Bias unit after fitting.
errors_ : list
Number of misclassifications (updates) in each epoch.
"""
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = np.float_(0.)
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_ += update * xi
self.b_ += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_) + self.b_
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, 0)
v1 = np.array([1, 2, 3])
v2 = 0.5 * v1
np.arccos(v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
# ## Training a perceptron model on the Iris dataset
# ...
# ### Reading-in the Iris data
try:
s = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
print('From URL:', s)
df = pd.read_csv(s,
header=None,
encoding='utf-8')
except HTTPError:
s = 'iris.data'
print('From local Iris path:', s)
df = pd.read_csv(s,
header=None,
encoding='utf-8')
df.tail()
# ### Plotting the Iris data
# select setosa and versicolor
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', 0, 1)
# extract sepal length and petal length
X = df.iloc[0:100, [0, 2]].values
# plot data
plt.scatter(X[:50, 0], X[:50, 1],
color='red', marker='o', label='Setosa')
plt.scatter(X[50:100, 0], X[50:100, 1],
color='blue', marker='s', label='Versicolor')
plt.xlabel('Sepal length [cm]')
plt.ylabel('Petal length [cm]')
plt.legend(loc='upper left')
# plt.savefig('images/02_06.png', dpi=300)
plt.show()
# ### Training the perceptron model
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of updates')
# plt.savefig('images/02_07.png', dpi=300)
plt.show()
# ### A function for plotting decision regions
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('o', 's', '^', 'v', '<')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
lab = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
lab = lab.reshape(xx1.shape)
plt.contourf(xx1, xx2, lab, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class examples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=f'Class {cl}',
edgecolor='black')
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('Sepal length [cm]')
plt.ylabel('Petal length [cm]')
plt.legend(loc='upper left')
#plt.savefig('images/02_08.png', dpi=300)
plt.show()
# # Adaptive linear neurons and the convergence of learning
# ...
# ## Minimizing cost functions with gradient descent
# ## Implementing an adaptive linear neuron in Python
class AdalineGD():
"""ADAptive LInear NEuron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
b_ : Scalar
Bias unit after fitting.
losses_ : list
Mean squared eror loss function values in each epoch.
"""
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = np.float_(0.)
self.losses_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
# Please note that the "activation" method has no effect
# in the code since it is simply an identity function. We
# could write `output = self.net_input(X)` directly instead.
# The purpose of the activation is more conceptual, i.e.,
# in the case of logistic regression (as we will see later),
# we could change it to
# a sigmoid function to implement a logistic regression classifier.
output = self.activation(net_input)
errors = (y - output)
#for w_j in range(self.w_.shape[0]):
# self.w_[w_j] += self.eta * (2.0 * (X[:, w_j]*errors)).mean()
self.w_ += self.eta * 2.0 * X.T.dot(errors) / X.shape[0]
self.b_ += self.eta * 2.0 * errors.mean()
loss = (errors**2).mean()
self.losses_.append(loss)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_) + self.b_
def activation(self, X):
"""Compute linear activation"""
return X
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ada1 = AdalineGD(n_iter=15, eta=0.1).fit(X, y)
ax[0].plot(range(1, len(ada1.losses_) + 1), np.log10(ada1.losses_), marker='o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Mean squared error)')
ax[0].set_title('Adaline - Learning rate 0.1')
ada2 = AdalineGD(n_iter=15, eta=0.0001).fit(X, y)
ax[1].plot(range(1, len(ada2.losses_) + 1), ada2.losses_, marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Mean squared error')
ax[1].set_title('Adaline - Learning rate 0.0001')
# plt.savefig('images/02_11.png', dpi=300)
plt.show()
# ## Improving gradient descent through feature scaling
# standardize features
X_std = np.copy(X)
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
ada_gd = AdalineGD(n_iter=20, eta=0.5)
ada_gd.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada_gd)
plt.title('Adaline - Gradient descent')
plt.xlabel('Sepal length [standardized]')
plt.ylabel('Petal length [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/02_14_1.png', dpi=300)
plt.show()
plt.plot(range(1, len(ada_gd.losses_) + 1), ada_gd.losses_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Mean squared error')
plt.tight_layout()
#plt.savefig('images/02_14_2.png', dpi=300)
plt.show()
# ## Large scale machine learning and stochastic gradient descent
class AdalineSGD():
"""ADAptive LInear NEuron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent cycles.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
b_ : Scalar
Bias unit after fitting.
losses_ : list
Mean squared error loss function value averaged over all
training examples in each epoch.
"""
def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
self.random_state = random_state
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
self._initialize_weights(X.shape[1])
self.losses_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
losses = []
for xi, target in zip(X, y):
losses.append(self._update_weights(xi, target))
avg_loss = np.mean(losses)
self.losses_.append(avg_loss)
return self
def partial_fit(self, X, y):
"""Fit training data without reinitializing the weights"""
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
def _shuffle(self, X, y):
"""Shuffle | |
<reponame>appleseedhq/cortex
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import maya.cmds
import IECore
import IECoreScene
import IECoreMaya
__dagMenuCallbacks = []
## Registers a callback to be used when creating the right click dag
# menu for scene shapes. Callbacks should have the following signature :
#
# callback( menuDefinition, sceneShape ).
def addDagMenuCallback( callback ) :
if not callback in __dagMenuCallbacks :
__dagMenuCallbacks.append( callback )
## Removes a callback previously added with addDagMenuCallback.
def removeDagMenuCallback( callback ) :
__dagMenuCallbacks.remove( callback )
def _menuDefinition( callbackShape ) :
sceneShapes = __selectedSceneShapes()
if not sceneShapes :
return
mainDef = IECore.MenuDefinition()
fnShapes = [ IECoreMaya.FnSceneShape( shape ) for shape in sceneShapes ]
# INVALID SHAPES
invalidSceneShapes = __invalidSceneShapes( sceneShapes )
if invalidSceneShapes :
mainDef.append( "/Invalid Inputs for selected SceneShapes!", { "blindData" : { "maya" : { "radialPosition" : "N" } } } )
return mainDef
# COMPONENT MODE
if fnShapes[ 0 ].selectedComponentNames() :
mainDef.append( "/Object", { "blindData" : { "maya" : { "radialPosition" : "N" } }, "command" : functools.partial( __objectCallback, sceneShapes[ 0 ] ) } )
mainDef.append( "/Print Component Names", { "blindData" : { "maya" : { "radialPosition" : "NW" } }, "command" : functools.partial( __printComponents, sceneShapes[ 0 ] ) } )
mainDef.append( "/Print Selected Component Names", { "blindData" : { "maya" : { "radialPosition" : "NE" } }, "command" : functools.partial( __printSelectedComponents, sceneShapes[ 0 ] ) } )
# EXPAND
expandDef = IECore.MenuDefinition( [
("/Expand to Selected Components", { "blindData" : { "maya" : { "radialPosition" : "S" } }, "command" : functools.partial( __expandToSelected, sceneShapes[ 0 ] ) }),
] )
mainDef.append( "/Expand...", { "blindData" : { "maya" : { "radialPosition" : "SE" } }, "subMenu" : expandDef } )
locatorDef = IECore.MenuDefinition( [
("/At Bound Min", { "blindData" : { "maya" : { "radialPosition" : "N" } }, "command" : functools.partial( __createLocatorAtPoints, sceneShapes[ 0 ], [ "Min" ] ) }),
("/At Bound Max", { "blindData" : { "maya" : { "radialPosition" : "NE" } }, "command" : functools.partial( __createLocatorAtPoints, sceneShapes[ 0 ], [ "Max" ] ) }),
("/At Bound Min And Max", { "blindData" : { "maya" : { "radialPosition" : "E" } }, "command" : functools.partial( __createLocatorAtPoints, sceneShapes[ 0 ], [ "Min", "Max" ] ) }),
("/At Bound Centre", { "blindData" : { "maya" : { "radialPosition" : "SE" } }, "command" : functools.partial( __createLocatorAtPoints, sceneShapes[ 0 ], [ "Center" ] ) }),
("/At Transform Origin", { "blindData" : { "maya" : { "radialPosition" : "S" } }, "command" : functools.partial( __createLocatorWithTransform, sceneShapes[ 0 ] ) }),
] )
mainDef.append( "/Create Locator", { "blindData" : { "maya" : { "radialPosition" : "SW" } }, "subMenu" : locatorDef } )
# OBJECT MODE
else :
# PREVIEW
if len( sceneShapes ) == 1 and (maya.cmds.getAttr( sceneShapes[ 0 ] + ".drawGeometry" ) or maya.cmds.getAttr( sceneShapes[ 0 ] + ".drawChildBounds" )) :
mainDef.append( "/Component", { "blindData" : { "maya" : { "radialPosition" : "N" } }, "command" : functools.partial( __componentCallback, sceneShapes[ 0 ] ) } )
previewDef = IECore.MenuDefinition( [
("/All Geometry On", { "blindData" : { "maya" : { "radialPosition" : "E" } }, "command" : functools.partial( __setChildrenPreviewAttributes, sceneShapes, "drawGeometry", True ) }),
("/All Child Bounds On", { "blindData" : { "maya" : { "radialPosition" : "SE" } }, "command" : functools.partial( __setChildrenPreviewAttributes, sceneShapes, "drawChildBounds", True ) }),
("/All Root Bound On", { "blindData" : { "maya" : { "radialPosition" : "NE" } }, "command" : functools.partial( __setChildrenPreviewAttributes, sceneShapes, "drawRootBound", True ) }),
("/All Geometry Off", { "blindData" : { "maya" : { "radialPosition" : "W" } }, "command" : functools.partial( __setChildrenPreviewAttributes, sceneShapes, "drawGeometry", False ) }),
("/All Child Bounds Off", { "blindData" : { "maya" : { "radialPosition" : "SW" } }, "command" : functools.partial( __setChildrenPreviewAttributes, sceneShapes, "drawChildBounds", False ) }),
("/All Root Bound Off", { "blindData" : { "maya" : { "radialPosition" : "NE" } }, "command" : functools.partial( __setChildrenPreviewAttributes, sceneShapes, "drawRootBound", False ) })
] )
mainDef.append( "/Preview...", { "blindData" : { "maya" : { "radialPosition" : "NW" } }, "subMenu" : previewDef } )
# get all tags that are shared between all shapes
commonTags = None
for fn in fnShapes :
scene = fn.sceneInterface()
tmpTags = scene.readTags( IECoreScene.SceneInterface.EveryTag )
if commonTags is None :
commonTags = set( tmpTags )
else :
commonTags.intersection_update( set( tmpTags ) )
tagTree = dict()
if commonTags :
for tag in commonTags :
tag = str( tag )
namespace, _, subTagsString = tag.partition( ':' )
subTags = set( subTagsString.split( ':' ) )
if not namespace in tagTree :
tagTree[ namespace ] = subTags
else :
tagTree[ namespace ].update( subTags )
# EXPAND
expandDef = IECore.MenuDefinition(
[ ("/Recursive Expand As Geometry", { "blindData" : { "maya" : { "radialPosition" : "W" } }, "command" : functools.partial( _expandAsGeometry, sceneShapes)})] )
mainDef.append( "/Expand...", { "blindData" : { "maya" : { "radialPosition" : "SE" } }, "subMenu" : expandDef } )
if any( map( lambda x : x.canBeExpanded(), fnShapes ) ) :
expandDef.append( "/Expand One Level", { "blindData" : { "maya" : { "radialPosition" : "S" } }, "command" : functools.partial( __expandOnce, sceneShapes ) } )
expandDef.append( "/Recursive Expand", { "blindData" : { "maya" : { "radialPosition" : "E" } }, "command" : functools.partial( _expandAll, sceneShapes)})
if len( sceneShapes ) == 1 and fnShapes[ 0 ].selectedComponentNames() :
expandDef.append( "/Expand to Selected Components", { "blindData" : { "maya" : { "radialPosition" : "S" } }, "command" : functools.partial( __expandToSelected, sceneShapes[ 0 ] ) } )
if tagTree :
tags = tagTree.keys()
tags.sort()
def addTagSubMenuItems( menuDef, command ) :
import copy
copiedTagTree = copy.deepcopy( tagTree )
for tag in tags :
subtags = list( copiedTagTree[ tag ] )
subtags.sort()
for subtag in subtags :
if subtag == '' :
label = "/{}".format( tag )
expandTag = tag
else :
label = "/{}/{}".format( tag, subtag )
expandTag = "{}:{}".format( tag, subtag )
menuDef.append( label, { "command" : functools.partial( command, sceneShapes, expandTag ) } )
filterDef = IECore.MenuDefinition( [
("/Display All", { "command" : functools.partial( _setTagsFilterPreviewAttributes, sceneShapes, "")})
] )
expandTagDef = IECore.MenuDefinition()
expandTagGeoDef = IECore.MenuDefinition()
mainDef.append( "/Tags filter...", { "blindData" : { "maya" : { "radialPosition" : "S" } }, "subMenu" : filterDef } )
addTagSubMenuItems( filterDef, _setTagsFilterPreviewAttributes)
addTagSubMenuItems( expandTagDef, _expandAll)
addTagSubMenuItems( expandTagGeoDef, _expandAsGeometry)
expandDef.append( "/Expand by Tag...", { "blindData" : { "maya" : { "radialPosition" : "SE" } }, "subMenu" : expandTagDef } )
expandDef.append( "/Expand by Tag as Geo...", { "blindData" : { "maya" : { "radialPosition" : "SW" } }, "subMenu" : expandTagGeoDef } )
parentSceneShape = __parentSceneShape( sceneShapes )
# COLLAPSE
if any( map( lambda x : x.canBeCollapsed(), fnShapes ) ) or (parentSceneShape and IECoreMaya.FnSceneShape( parentSceneShape ).canBeCollapsed()) :
collapseDef = IECore.MenuDefinition()
if parentSceneShape and IECoreMaya.FnSceneShape( parentSceneShape ).canBeCollapsed() :
parentName = maya.cmds.listRelatives( parentSceneShape, p = True )[ 0 ]
collapseDef.append( "/Collapse to Parent: {}".format( parentName ),
{ "blindData" : { "maya" : { "radialPosition" : "N" } }, "command" : functools.partial( __collapseChildren, [ parentSceneShape ] ) } )
if any( map( lambda x : x.canBeCollapsed(), fnShapes ) ) :
collapseDef.append( "/Collapse Children", { "blindData" : { "maya" : { "radialPosition" : "W" } }, "command" : functools.partial( __collapseChildren, sceneShapes ) } )
mainDef.append( "/Collapse...", { "blindData" : { "maya" | |
"""
Django model definitions (database schema).
## django-peeringdb
peeringdb_server uses the abstract models from django-peeringdb.
Often, it makes the most sense for a field to be added to the abstraction
in django-peeringdb, so it can be available for people using local snapshots of the databases.
Generally speaking, if the field is to be added to the REST API output,
it should be added through django-peeringdb.
Fields to facilitate internal operations of peeringdb on the other hand, DO NOT need to be added to django-peeringdb.
## migrations
For concrete models, django-peeringdb and peeringdb_server maintain separate model migrations.
When adding new fields to django-peeringdb make sure migration files for the schema changes exist in both places.
Please open a merge request in peeringdb/django-peeringdb for the field addition as well.
"""
import datetime
import ipaddress
import json
import re
import uuid
from itertools import chain
import django.urls
import django_peeringdb.models as pdb_models
import reversion
from allauth.account.models import EmailAddress, EmailConfirmation
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser,
Group,
PermissionsMixin,
UserManager,
)
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.mail.message import EmailMultiAlternatives
from django.db import models, transaction
from django.template import loader
from django.utils import timezone
from django.utils.functional import Promise
from django.utils.http import urlquote
from django.utils.translation import override
from django.utils.translation import ugettext_lazy as _
from django_grainy.decorators import grainy_model
from django_grainy.models import Permission, PermissionManager
from django_grainy.util import check_permissions
from django_handleref.models import CreatedDateTimeField, UpdatedDateTimeField
from django_inet.models import ASNField
from passlib.hash import sha256_crypt
from rest_framework_api_key.models import AbstractAPIKey
import peeringdb_server.geo as geo
from peeringdb_server.inet import RdapLookup, RdapNotFoundError
from peeringdb_server.request import bypass_validation
from peeringdb_server.validators import (
validate_address_space,
validate_info_prefixes4,
validate_info_prefixes6,
validate_irr_as_set,
validate_phonenumber,
validate_poc_visible,
validate_prefix_overlap,
)
SPONSORSHIP_LEVELS = (
(1, _("Silver")),
(2, _("Gold")),
(3, _("Platinum")),
(4, _("Diamond")),
)
SPONSORSHIP_CSS = (
(1, "silver"),
(2, "gold"),
(3, "platinum"),
(4, "diamond"),
)
PARTNERSHIP_LEVELS = ((1, _("Data Validation")), (2, _("RIR")))
COMMANDLINE_TOOLS = (
("pdb_renumber_lans", _("Renumber IP Space")),
("pdb_fac_merge", _("Merge Facilities")),
("pdb_fac_merge_undo", _("Merge Facilities: UNDO")),
("pdb_undelete", _("Restore Object(s)")),
)
if settings.TUTORIAL_MODE:
COMMANDLINE_TOOLS += (("pdb_wipe", _("Reset Environment")),)
COMMANDLINE_TOOLS += (("pdb_ixf_ixp_member_import", _("IX-F Import")),)
def debug_mail(*args):
for arg in list(args):
print(arg)
print("-----------------------------------")
def make_relation_filter(field, filt, value, prefix=None):
if prefix:
field = re.sub("^%s__" % prefix, "", field)
field = re.sub("^%s_" % prefix, "", field)
if field == prefix:
field = "id"
if filt:
filt = {f"{field}__{filt}": value}
else:
filt = {field: value}
filt.update(status="ok")
return filt
def validate_PUT_ownership(permission_holder, instance, data, fields):
"""
Helper function that checks if a user or API key has write perms to
the instance provided as well as write perms to any
child instances specified by fields as they exist on
the model and in data.
Example:
validate_PUT_ownership(
request.user,
network_contact,
{
"network": 123,
...
},
["network"]
)
will check that the user has write perms to
1. <NetworkContact> network_contact
2. <Network> network_contact.network
3. <Network> network(id=123)
if any fail the permission check False is returned.
"""
if not check_permissions(permission_holder, instance, "u"):
return False
for fld in fields:
if fld == "net":
field_name = "network"
elif fld == "fac":
field_name = "facility"
else:
field_name = fld
a = getattr(instance, field_name)
try:
s_id = int(data.get(fld, data.get("%s_id" % fld)))
except ValueError:
continue
if a.id != s_id:
try:
other = a.__class__.objects.get(id=s_id)
if not check_permissions(permission_holder, other, "u"):
return False
except ValueError: # if id is not intable
return False
return True
def is_suggested(entity):
"""
Check if the network, facility or exchange is a suggested
entity (is it a memeber of the organization designated to
hold suggested entities).
"""
# if no org is specified, entity suggestion is turned
# off
if not getattr(settings, "SUGGEST_ENTITY_ORG", 0):
return False
org_id = getattr(entity, "org_id", 0)
return org_id == settings.SUGGEST_ENTITY_ORG
class UTC(datetime.tzinfo):
"""
UTC+0 tz for tz aware datetime fields.
"""
def utcoffset(self, d):
return datetime.timedelta(seconds=0)
class URLField(pdb_models.URLField):
"""
Local defaults for URLField.
"""
class ValidationErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ValidationError):
if hasattr(obj, "error_dict"):
return obj.error_dict
return obj.message
elif isinstance(obj, Promise):
return f"{obj}"
return super().default(obj)
class ProtectedAction(ValueError):
def __init__(self, obj):
super().__init__(obj.not_deletable_reason)
self.protected_object = obj
class ProtectedMixin:
"""
Mixin that implements checks for changing
/ deleting a model instance that will block
such actions under certain circumstances.
"""
@property
def deletable(self):
"""
Should return whether the object is currently
in a state where it can safely be soft-deleted.
If not deletable, should specify reason in
`_not_deletable_reason` property.
If deletable, should set `_not_deletable_reason`
property to None.
"""
return True
@property
def not_deletable_reason(self):
return getattr(self, "_not_deletable_reason", None)
def delete(self, hard=False, force=False):
if self.status in ["ok", "pending"]:
if not self.deletable and not force:
raise ProtectedAction(self)
self.delete_cleanup()
return super().delete(hard=hard)
def delete_cleanup(self):
"""
Runs cleanup before delete.
Override this in the class that uses this mixin (if needed).
"""
return
def save_without_timestamp(self):
self._meta.get_field("updated").auto_now = False
try:
self.save()
finally:
self._meta.get_field("updated").auto_now = True
class GeocodeBaseMixin(models.Model):
"""
Mixin to use for geocode enabled entities.
Allows an entity to be geocoded with the pdb_geo_sync command.
"""
geocode_status = models.BooleanField(
default=False,
help_text=_(
"Has this object's address been normalized with a call to the Google Maps API"
),
)
geocode_date = models.DateTimeField(
blank=True, null=True, help_text=_("Last time of attempted geocode")
)
class Meta:
abstract = True
@property
def geocode_coordinates(self):
"""
Return a tuple holding the latitude and longitude.
"""
if self.latitude is not None and self.longitude is not None:
return (self.latitude, self.longitude)
return None
@property
def geocode_address(self):
"""
Returns an address string suitable for geo API query.
"""
# pylint: disable=missing-format-attribute
return "{e.address1} {e.address2}, {e.city}, {e.state} {e.zipcode}".format(
e=self
)
def process_geo_location(self, geocode=True, save=True):
"""
Sets longitude and latitude.
Will return a dict containing normalized address
data.
"""
melissa = geo.Melissa(settings.MELISSA_KEY, timeout=5)
gmaps = geo.GoogleMaps(settings.GOOGLE_GEOLOC_API_KEY, timeout=5)
# geocode using google
use_melissa_coords = False
try:
if geocode:
gmaps.geocode(self)
except geo.Timeout:
raise ValidationError(_("Geo coding timed out"))
except geo.RequestError as exc:
raise ValidationError(_("Geo coding failed: {}").format(exc))
except geo.NotFound:
use_melissa_coords = True
# address normalization using melissa
#
# note: `sanitized` will be an empty dict if melissa
# could not normalize a valid address
try:
sanitized = melissa.sanitize_address_model(self)
except geo.Timeout:
raise ValidationError(_("Geo location lookup timed out"))
except geo.RequestError as exc:
raise ValidationError(_("Geo location lookup failed: {}").format(exc))
# update latitude and longitude
if use_melissa_coords and sanitized:
self.latitude = sanitized["latitude"]
self.longitude = sanitized["longitude"]
if geocode and (not use_melissa_coords or sanitized):
self.geocode_status = True
self.geocode_date = datetime.datetime.now(datetime.timezone.utc)
if sanitized:
sanitized["geocode_status"] = True
sanitized["geocode_date"] = self.geocode_date
if save:
self.save()
return sanitized
class GeoCoordinateCache(models.Model):
"""
Stores geocoordinates for address lookups.
"""
country = pdb_models.CountryField()
city = models.CharField(max_length=255, null=True, blank=True)
address1 = models.CharField(max_length=255, null=True, blank=True)
state = models.CharField(max_length=255, null=True, blank=True)
zipcode = models.CharField(max_length=255, null=True, blank=True)
latitude = models.DecimalField(
_("Latitude"), max_digits=9, decimal_places=6, null=True, blank=True
)
longitude = models.DecimalField(
_("Longitude"), max_digits=9, decimal_places=6, null=True, blank=True
)
fetched = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "peeringdb_geocoord_cache"
verbose_name = _("Geocoordinate Cache")
verbose_name_plural = _("Geocoordinate Cache Entries")
@classmethod
def request_coordinates(cls, **kwargs):
address_fields = [
"address1",
"zipcode",
"state",
"city",
"country",
]
# we only request geo-coordinates if country and
# city/state are specified
if not kwargs.get("country"):
return None
if not kwargs.get("city") and not kwargs.get("state"):
return None
# address string passed to google for lookup
address = []
# filters passed to GeoCoordinateCache for cache retrieval
filters = {}
# attributes passed to GeoCoordinateCache for cache creation
params = {}
# prepare geo-coordinate filters, params and lookup
for field in address_fields:
value = kwargs.get(field, None)
if value and isinstance(value, list):
value = value[0]
if field != "country" and value:
address.append(f"{value}")
else:
country = value
params[field] = value
if value:
filters[field] = value
else:
filters[f"{field}__isnull"] = True
# attempt to retrieve a valid cache
cache = cls.objects.filter(**filters).order_by("-fetched").first()
if cache:
tdiff = timezone.now() - cache.fetched
# check if cache is past expiry date, and expire it if so
if tdiff.total_seconds() > settings.GEOCOORD_CACHE_EXPIRY:
cache.delete()
cache = None
if not cache:
# valid geo-coord cache does not exist, request coordinates
# from google and create a cache entry
address = " ".join(address)
google = geo.GoogleMaps(settings.GOOGLE_GEOLOC_API_KEY)
try:
if params.get("address1"):
typ = "premise"
elif params.get("zipcode"):
typ = "postal"
elif params.get("city"):
typ = "city"
elif params.get("state"):
typ = "state"
else:
typ = "country"
coords = google.geocode_address(address, country, typ=typ)
cache = cls.objects.create(
latitude=coords["lat"], longitude=coords["lng"], **params
)
except geo.NotFound:
# google could not find address
# we still create a cache entry with null coordinates.
cls.objects.create(**params)
raise
return {"longitude": cache.longitude, "latitude": cache.latitude}
class UserOrgAffiliationRequest(models.Model):
"""
Whenever a user requests to be affiliated to an Organization
| |
1424.647927352167, 2676217.667872637, 5151840.492070999,
6253461.142118783, 11.605501217123276, 0.016034774278074274, 1.670666167191741e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402352374322306, 1.746960716329272e-06],
[1046.2476557504224, 1253.7081957386629, 1424.647927352167, 2576404.8537268285, 5037123.924454901,
6264496.097901617, 10.926743668226985, 0.015654224593012078, 1.7221039311460954e-06],
[1179.6140307813284, 1284.2421194708988, 1446.7483442856462, 2854778.224454453, 5290343.222690649,
5907538.352840094, 10.175809217601456, 0.017136258349463553, 1.8805380048230116e-06],
[1045.94074477175, 1257.5582265374605, 1428.1727195332353, 2674918.453684049, 5029083.46383821,
6252781.131346029, 9.66368383513432, 0.01508497956653298, 1.6858501133596892e-06],
[1172.1322901571223, 1273.6063854006757, 1424.647927352167, 2787890.321944607, 4970750.218790298,
6264496.097901617, 11.69640114007929, 0.015654224593012078, 1.7492546775003008e-06],
[1114.9126539360466, 1255.3060555816992, 1420.7016199249624, 2711709.7515290566, 5135604.332508602,
6339744.606807769, 9.869108693672011, 0.01508497956653298, 1.64751e-06],
[1045.94074477175, 1257.5582265374605, 1428.1727195332353, 2676965.0315407575, 5029083.46383821,
6252781.131346029, 9.912726746719038, 0.01508497956653298, 1.6858501133596892e-06],
[1116.099438708417, 1248.2528109041914, 1431.2802890524115, 2618670.60777649, 5020955.320062604,
6252740.844923852, 10.403131094036514, 0.01439649066491804, 1.6612822159851205e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1114.834792035313, 1260.5869822935006, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6058545.49793099, 9.869052328516, 0.01508497956653298, 1.64751e-06],
[1100.1063624541289, 1231.7664627414742, 1436.3027121805815, 3627678.357077309, 4986879.9039197,
2532740.0207836498, 10.181273986642962, 0.016620130673890635, 2.3929656390077357e-06],
[1100.3826382823288, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.6414678246117103e-06],
[1180.7233642687456, 1290.1144240449926, 1434.8120791304352, 2854778.224454453, 5290343.222690649,
6018525.355599652, 10.111309139135487, 0.017509170428913215, 1.874732806720558e-06],
[1050.9762404493679, 1258.4266239556036, 1440.177953468843, 2711337.2097504647, 4951513.490183301,
6252781.131346029, 9.840366214118836, 0.01508497956653298, 1.6858501133596892e-06],
[1112.0739142572368, 1252.4221946420748, 1421.4288718354624, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.787151112326503, 0.014995216559648368, 1.6865448209421645e-06],
[1172.1322901571223, 1263.903713429593, 1424.647927352167, 2627915.438331411, 5539495.081817166,
6264496.097901617, 11.21124190291663, 0.015654224593012078, 1.7492546775003008e-06],
[1115.3505637801043, 1250.1440195093382, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6761119331505416e-06],
[1100.1408530781362, 1259.20529625876, 1420.7016199249624, 2794529.276377869, 5137466.622582068,
6291895.722338178, 9.959965929147502, 0.014877788107490625, 1.6827081430883913e-06],
[1114.9126539360466, 1256.6998042531457, 1432.7380531614376, 2711709.7515290566, 5135842.055580591,
6339744.606807769, 10.010294528649096, 0.014604814079047299, 1.64751e-06],
[1114.8364994143328, 1276.755042176067, 1430.0554994299264, 2711709.7515290566, 5061252.899416794,
6843779.100678993, 9.962874958185285, 0.014995216559648368, 1.6435718248748259e-06],
[1172.1322901571223, 1273.6063854006757, 1424.647927352167, 2787890.321944607, 4970750.218790298,
6264496.097901617, 11.69640114007929, 0.015654224593012078, 1.7492546775003008e-06],
[1114.834792035313, 1252.4221946420748, 1431.2324989413153, 2674228.063186179, 5073692.871060503,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.64441131717817e-06],
[1050.9762404493679, 1253.7997759357147, 1446.6888589765636, 2642226.6327868286, 5029083.46383821,
6189310.214897488, 9.939222256778564, 0.015713623088508245, 1.6858501133596892e-06],
[1114.8364015979141, 1262.2228977124012, 1424.509665833488, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.644233311414821e-06],
[1045.94074477175, 1257.5582265374605, 1428.1727195332353, 2676965.0315407575, 5029083.46383821,
6252781.131346029, 9.912726746719038, 0.01508497956653298, 1.6858501133596892e-06],
[1112.0739142572368, 1252.4221946420748, 1421.4288718354624, 2711709.7515290566, 5079729.931980113,
6205159.784569372, 9.523942410142089, 0.014995216559648368, 1.6455995539949147e-06],
[1125.2043182911782, 1262.2228977124012, 1424.509665833488, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.644233311414821e-06],
[1115.400748739093, 1265.5877782356972, 1428.3010571928912, 2714079.301423972, 5048177.206187649,
6403353.83122605, 11.579324789874297, 0.015321153721496901, 1.6447615608703846e-06],
[1172.1322901571223, 1272.8969286837373, 1437.557746508848, 2643981.980613781, 5644960.1495587025,
6319984.010862878, 11.608379366203652, 0.0153098728874533, 1.7558263639829205e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402352374322306, 1.772746704805386e-06],
[1089.037314605264, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5198282.044819644,
6403335.37885126, 9.87071144774333, 0.015004521873080429, 1.6462972507927568e-06],
[1171.886230946291, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5541510.5883321315,
6254834.865253087, 11.602935611820246, 0.015129553230613428, 1.646211033468178e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1045.94074477175, 1257.5582265374605, 1428.1727195332353, 2674918.453684049, 5029083.46383821,
6252781.131346029, 9.66368383513432, 0.01508497956653298, 1.6858501133596892e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1114.9126539360466, 1263.4435351891264, 1420.7016199249624, 2711709.7515290566, 5234415.800976389,
6351339.408023649, 9.599662957877094, 0.01508497956653298, 1.64751e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6211148.3834170075, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1172.1322901571223, 1249.0062677413969, 1424.647927352167, 2676217.667872637, 5151840.492070999,
6253461.142118783, 11.605501217123276, 0.016034774278074274, 1.670666167191741e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1094.20494378046, 1257.1652413000595, 1434.1536421639848, 2640666.855664094, 5123686.718780826,
6266674.486555223, 10.336890255390792, 0.01508497956653298, 1.64751e-06],
[1140.2618729167225, 1259.7696328062204, 1425.9492710629509, 2715399.6210057866, 5204539.152162562,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.401218941020598, 0.01508497956653298, 1.6571644711735225e-06],
[1050.9762404493679, 1253.7841068951632, 1446.6888589765636, 2641700.2350165956, 5029083.46383821,
6189310.214897488, 9.939222256778564, 0.015713623088508245, 1.6858501133596892e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014416923144892024, 1.746960716329272e-06],
[1106.8161654815144, 1260.5869822935006, 1428.2938999164598, 2756856.4798341207, 4987417.102943326,
6252960.904134171, 11.324658824884459, 0.01508497956653298, 1.6859236759203733e-06],
[1170.815684829177, 1263.903713429593, 1424.647927352167, 2627915.438331411, 5539495.081817166,
6264496.097901617, 11.21124190291663, 0.015919094724508868, 1.7222399231838787e-06],
[1124.8314582363391, 1244.7486449823257, 1428.1727195332353, 2711109.3787926026, 4957035.652219516,
6252781.131346029, 9.912726746719038, 0.015131656963552058, 1.6858501133596892e-06],
[1159.0274286361612, 1287.8053727475722, 1448.538005613146, 2854778.224454453, 5360842.436526001,
5907538.352840094, 10.257090415594687, 0.017136258349463553, 1.8805380048230116e-06],
[1050.9762404493679, 1253.7997759357147, 1446.6888589765636, 2675995.973765552, 5086221.215584276,
6307046.665064646, 9.939222256778564, 0.0148888930145161, 1.6858501133596892e-06],
[1114.8364015979141, 1262.2228977124012, 1424.509665833488, 2708245.6581426947, 5177667.749296468,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.644233311414821e-06],
[1050.9762404493679, 1253.7997759357147, 1446.6888589765636, 2642226.6327868286, 5029083.46383821,
6252781.131346029, 9.939222256778564, 0.015713623088508245, 1.6858501133596892e-06],
[1094.20494378046, 1257.1652413000595, 1434.1536421639848, 2640666.855664094, 5123686.718780826,
6266674.486555223, 10.336890255390792, 0.01508497956653298, 1.6437081192784193e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.31993774216143, 0.01508497956653298, 1.675718750508336e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1124.6022184104063, 1253.9976612371274, 1428.1727195332353, 2711109.3787926026, 5177658.5684807105,
6056075.082738621, 9.912726746719038, 0.014876312019105655, 1.6447260544123108e-06],
[1102.9707247869544, 1260.5869822935006, 1420.5917316831233, 2825130.0490485826, 5151932.1545183025,
6329254.498107326, 9.870483944224635, 0.01508497956653298, 1.6471410354130274e-06],
[1100.1408530781362, 1259.20529625876, 1423.8349912868905, 2794529.276377869, 5137466.622582068,
6100698.446400893, 9.963100330742796, 0.014877945336207184, 1.6416877649487889e-06],
[1114.9126539360466, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1050.9762404493679, 1253.7997759357147, 1446.6888589765636, 2675995.973765552, 5086221.215584276,
6307046.665064646, 9.939222256778564, 0.0148888930145161, 1.6858501133596892e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402352374322306, 1.7961069170362274e-06],
[1124.0077507460633, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5341886.884732996,
6274517.334926196, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1114.9126539360466, 1256.6998042531457, 1432.7380531614376, 2711709.7515290566, 5135842.055580591,
6339744.606807769, 10.010294528649096, 0.014604814079047299, 1.64751e-06],
[1089.037314605264, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5198282.044819644,
6403335.37885126, 9.87071144774333, 0.015004521873080429, 1.6462972507927568e-06],
[1115.3505637801043, 1250.1440195093382, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6761119331505416e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402352374322306, 1.772746704805386e-06],
[1117.1225010723806, 1245.0461992541775, 1408.2360625510769, 2809265.192909424, 5151932.1545183025,
6034517.235726284, 9.899845029941671, 0.014343389674800526, 1.7139568106579685e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402711636037064, 1.8287965466133632e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2769369.5842076726, 4987417.102943326,
6252960.904134171, 11.31993774216143, 0.01508497956653298, 1.675718750508336e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4982497.032409398,
6316982.864584648, 11.401218941020598, 0.015071570205789586, 1.6571644711735225e-06],
[1172.1322901571223, 1272.8969286837373, 1437.557746508848, 2643981.980613781, 5644960.1495587025,
6319984.010862878, 11.608379366203652, 0.0153098728874533, 1.7558263639829205e-06],
[1115.3505637801043, 1250.1440195093382, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6202055.6442852765, 9.899845029941671, 0.01491459237885602, 1.6761119331505416e-06],
[1102.9707247869544, 1260.5869822935006, 1420.5917316831233, 2825130.0490485826, 5151932.1545183025,
6329254.498107326, 9.870483944224635, 0.01508497956653298, 1.6471410354130274e-06],
[1106.8161654815144, 1260.5869822935006, 1428.2938999164598, 2756856.4798341207, 4987417.102943326,
6252960.904134171, 11.324658824884459, 0.01508497956653298, 1.6859236759203733e-06],
[1171.886230946291, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5541510.5883321315,
6254834.865253087, 11.602935611820246, 0.015129553230613428, 1.646211033468178e-06],
[1124.3787600097082, 1256.786782905013, 1432.7380531614376, 2704309.8939345954, 5135842.055580591,
6339744.606807769, 10.010198748307836, 0.014604814079047299, 1.64751e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014416923144892024, 1.746960716329272e-06],
[1100.1408530781362, 1259.20529625876, 1420.7016199249624, 2794529.276377869, 5137466.622582068,
6291895.722338178, 9.959965929147502, 0.014877788107490625, 1.6827081430883913e-06],
[1111.4397194962298, 1252.3352159902076, 1431.2324989413153, 2834053.9104807484, 5073692.871060503,
6205159.784569372, 9.962970738526543, 0.014995216559648368, 1.64441131717817e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.401218941020598, 0.01508497956653298, 1.6571644711735225e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.31993774216143, 0.01508497956653298, 1.675718750508336e-06],
[1094.9494294625033, 1260.5869822935006, 1420.5917316831233, 2825130.0490485826, 5151932.1545183025,
6328379.705405241, 9.703511954746483, 0.01508497956653298, 1.6477426444753308e-06],
[1114.8364994143328, 1276.755042176067, 1430.0554994299264, 2711709.7515290566, 5061252.899416794,
6843779.100678993, 9.962874958185285, 0.014995216559648368, 1.6435718248748259e-06],
[1112.0739142572368, 1252.4221946420748, 1421.4288718354624, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.787151112326503, 0.014995216559648368, 1.6865448209421645e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2645497.698072738, 5634195.507417285,
6253461.142118783, 11.538003087103846, 0.015129553230613428, 1.646211033468178e-06],
[1115.400748739093, 1265.5877782356972, 1428.3010571928912, 2714079.301423972, 5048177.206187649,
6403353.83122605, 11.579324789874297, 0.015321153721496901, 1.6447615608703846e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1165.821735989405, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6264496.097901617, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1140.2618729167225, 1259.7696328062204, 1425.9492710629509, 2715399.6210057866, 5204539.152162562,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1102.2121241384732, 1260.5869822935006, 1425.726056523609, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.6414678246117103e-06],
[1172.1322901571223, 1275.3575942303487, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1100.3826382823288, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.6414678246117103e-06],
[1172.1322901571223, 1273.6063854006757, 1424.647927352167, 2787890.321944607, 4970750.218790298,
6264496.097901617, 11.69640114007929, 0.015654224593012078, 1.7492546775003008e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1115.3505637801043, 1259.877397589078, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6211148.3834170075, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1165.0546070424907, 1263.903713429593, 1424.647927352167, 2674819.702993904, 5540572.403677184,
6211148.3834170075, 11.21124190291663, 0.015844898331152327, 1.7492546775003008e-06],
[1114.9126539360466, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1045.94074477175, 1257.5582265374605, 1428.1727195332353, 2676965.0315407575, 5029083.46383821,
6252781.131346029, 9.912726746719038, 0.01508497956653298, 1.6858501133596892e-06],
[1050.9762404493679, 1258.4266239556036, 1440.177953468843, 2711337.2097504647, 4951513.490183301,
6252781.131346029, 9.840366214118836, 0.01508497956653298, 1.6858501133596892e-06],
[1112.0739142572368, 1252.4221946420748, 1421.4288718354624, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.462388216486575, 0.014995216559648368, 1.6865448209421645e-06],
[1114.9126539360466, 1263.4435351891264, 1420.7016199249624, 2711709.7515290566, 5234415.800976389,
6351339.408023649, 9.599662957877094, 0.01508497956653298, 1.64751e-06],
[1124.8314582363391, 1244.7486449823257, 1428.1727195332353, 2711109.3787926026, 4957035.652219516,
6252781.131346029, 9.912726746719038, 0.015131656963552058, 1.6858501133596892e-06],
[1095.4692686979017, 1253.9910313461303, 1434.1536421639848, 2640666.855664094, 5123686.718780826,
6266431.849747426, 10.336890255390792, 0.01508497956653298, 1.64751e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1050.9762404493679, 1253.7997759357147, 1446.6888589765636, 2675995.973765552, 5086221.215584276,
6307046.665064646, 9.939222256778564, 0.0148888930145161, 1.6858501133596892e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402352374322306, 1.7961069170362274e-06],
[1124.0077507460633, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5341886.884732996,
6274517.334926196, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1114.9126539360466, 1256.6998042531457, 1432.7380531614376, 2711709.7515290566, 5135842.055580591,
6339744.606807769, 10.010294528649096, 0.014604814079047299, 1.64751e-06],
[1089.037314605264, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5198282.044819644,
6403335.37885126, 9.87071144774333, 0.015004521873080429, 1.6462972507927568e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6382155.347856246, 10.793393892973041, 0.014402352374322306, 1.772746704805386e-06],
[1115.3505637801043, 1250.1440195093382, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6761119331505416e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6252740.844923852, 10.139251868310934, 0.01491459237885602, 1.7006950945250131e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402352374322306, 1.772746704805386e-06],
[1117.1225010723806, 1245.0461992541775, 1408.2360625510769, 2809265.192909424, 5151932.1545183025,
6034517.235726284, 9.899845029941671, 0.014343389674800526, 1.7139568106579685e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014402711636037064, 1.8287965466133632e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2769369.5842076726, 4987417.102943326,
6252960.904134171, 11.31993774216143, 0.01508497956653298, 1.675718750508336e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4982497.032409398,
6316982.864584648, 11.401218941020598, 0.015071570205789586, 1.6571644711735225e-06],
[1100.1408530781362, 1259.20529625876, 1420.7016199249624, 2794529.276377869, 5137466.622582068,
6291895.722338178, 9.923151891580432, 0.01467653963001504, 1.6827081430883913e-06],
[1172.1322901571223, 1272.8969286837373, 1437.557746508848, 2643981.980613781, 5644960.1495587025,
6319984.010862878, 11.608379366203652, 0.0153098728874533, 1.7558263639829205e-06],
[1096.634295467943, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.401218941020598, 0.01508497956653298, 1.6571644711735225e-06],
[1115.3505637801043, 1250.1440195093382, 1417.21404073115, 2809265.192909424, 5151932.1545183025,
6202055.6442852765, 9.899845029941671, 0.01491459237885602, 1.6761119331505416e-06],
[1102.9707247869544, 1260.5869822935006, 1420.5917316831233, 2825130.0490485826, 5151932.1545183025,
6329254.498107326, 9.870483944224635, 0.01508497956653298, 1.6471410354130274e-06],
[1103.1986748240922, 1259.20529625876, 1421.6358662409607, 2794529.276377869, 5138339.579249148,
6317443.2095390335, 9.959965929147502, 0.014876786169789531, 1.6827081430883913e-06],
[1106.8161654815144, 1260.5869822935006, 1428.2938999164598, 2756856.4798341207, 4987417.102943326,
6252960.904134171, 11.324658824884459, 0.01508497956653298, 1.6859236759203733e-06],
[1171.886230946291, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5541510.5883321315,
6254834.865253087, 11.602935611820246, 0.015129553230613428, 1.646211033468178e-06],
[1124.3787600097082, 1256.786782905013, 1432.7380531614376, 2704309.8939345954, 5135842.055580591,
6339744.606807769, 10.010198748307836, 0.014604814079047299, 1.64751e-06],
[1172.1322901571223, 1261.353970231427, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6122658.492254281, 11.426790838912128, 0.014416923144892024, 1.746960716329272e-06],
[1100.1408530781362, 1259.20529625876, 1420.7016199249624, 2794529.276377869, 5137466.622582068,
6291895.722338178, 9.959965929147502, 0.014877788107490625, 1.6827081430883913e-06],
[1111.4397194962298, 1252.3352159902076, 1431.2324989413153, 2834053.9104807484, 5073692.871060503,
6205159.784569372, 9.962970738526543, 0.014995216559648368, 1.64441131717817e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.401218941020598, 0.01508497956653298, 1.6571644711735225e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.31993774216143, 0.01508497956653298, 1.675718750508336e-06],
[1094.9494294625033, 1260.5869822935006, 1420.5917316831233, 2825130.0490485826, 5151932.1545183025,
6328379.705405241, 9.703511954746483, 0.01508497956653298, 1.6477426444753308e-06],
[1106.8161654815144, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 4987417.102943326,
6252960.904134171, 11.29179270201854, 0.01508497956653298, 1.675718750508336e-06],
[1114.8364994143328, 1276.755042176067, 1430.0554994299264, 2711709.7515290566, 5061252.899416794,
6843779.100678993, 9.962874958185285, 0.014995216559648368, 1.6435718248748259e-06],
[1112.0739142572368, 1252.4221946420748, 1421.4288718354624, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.787151112326503, 0.014995216559648368, 1.6865448209421645e-06],
[1123.0873102981457, 1256.786782905013, 1432.7380531614376, 2704309.8939345954, 5135842.055580591,
6339744.606807769, 10.010198748307836, 0.0150714007214576, 1.6742977494042081e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2645497.698072738, | |
'# clequ1Ph3x2x3x3'],
[3, 3, 1, 1, format(angle(scaled_wc('lequ1_3311')), '.6e'), '# clequ1Ph3x3x1x1'],
[3, 3, 1, 2, format(angle(scaled_wc('lequ1_3312')), '.6e'), '# clequ1Ph3x3x1x2'],
[3, 3, 1, 3, format(angle(scaled_wc('lequ1_3313')), '.6e'), '# clequ1Ph3x3x1x3'],
[3, 3, 2, 1, format(angle(scaled_wc('lequ1_3321')), '.6e'), '# clequ1Ph3x3x2x1'],
[3, 3, 2, 2, format(angle(scaled_wc('lequ1_3322')), '.6e'), '# clequ1Ph3x3x2x2'],
[3, 3, 2, 3, format(angle(scaled_wc('lequ1_3323')), '.6e'), '# clequ1Ph3x3x2x3'],
[3, 3, 3, 1, format(angle(scaled_wc('lequ1_3331')), '.6e'), '# clequ1Ph3x3x3x1'],
[3, 3, 3, 2, format(angle(scaled_wc('lequ1_3332')), '.6e'), '# clequ1Ph3x3x3x2'],
[3, 3, 3, 3, format(angle(scaled_wc('lequ1_3333')), '.6e'), '# clequ1Ph3x3x3x3'],
]}
card['Block']['FRBlock8'] = {'values': [
[1, 1, format(abs(scaled_wc('dphi_11'))* lambda_smeft_value**2, '.6e'), '# cdHAbs1x1'],
[1, 2, format(abs(scaled_wc('dphi_12'))* lambda_smeft_value**2, '.6e'), '# cdHAbs1x2'],
[1, 3, format(abs(scaled_wc('dphi_13'))* lambda_smeft_value**2, '.6e'), '# cdHAbs1x3'],
[2, 1, format(abs(scaled_wc('dphi_21'))* lambda_smeft_value**2, '.6e'), '# cdHAbs2x1'],
[2, 2, format(abs(scaled_wc('dphi_22'))* lambda_smeft_value**2, '.6e'), '# cdHAbs2x2'],
[2, 3, format(abs(scaled_wc('dphi_23'))* lambda_smeft_value**2, '.6e'), '# cdHAbs2x3'],
[3, 1, format(abs(scaled_wc('dphi_31'))* lambda_smeft_value**2, '.6e'), '# cdHAbs3x1'],
[3, 2, format(abs(scaled_wc('dphi_32'))* lambda_smeft_value**2, '.6e'), '# cdHAbs3x2'],
[3, 3, format(abs(scaled_wc('dphi_33'))* lambda_smeft_value**2, '.6e'), '# cdHAbs3x3'],
]}
card['Block']['FRBlock81'] = {'values': [
[1, 1, 1, 1, format(abs(scaled_wc('lequ3_1111'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x1x1'],
[1, 1, 1, 2, format(abs(scaled_wc('lequ3_1112'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x1x2'],
[1, 1, 1, 3, format(abs(scaled_wc('lequ3_1113'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x1x3'],
[1, 1, 2, 1, format(abs(scaled_wc('lequ3_1121'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x2x1'],
[1, 1, 2, 2, format(abs(scaled_wc('lequ3_1122'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x2x2'],
[1, 1, 2, 3, format(abs(scaled_wc('lequ3_1123'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x2x3'],
[1, 1, 3, 1, format(abs(scaled_wc('lequ3_1131'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x3x1'],
[1, 1, 3, 2, format(abs(scaled_wc('lequ3_1132'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x3x2'],
[1, 1, 3, 3, format(abs(scaled_wc('lequ3_1133'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x1x3x3'],
[1, 2, 1, 1, format(abs(scaled_wc('lequ3_1211'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x1x1'],
[1, 2, 1, 2, format(abs(scaled_wc('lequ3_1212'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x1x2'],
[1, 2, 1, 3, format(abs(scaled_wc('lequ3_1213'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x1x3'],
[1, 2, 2, 1, format(abs(scaled_wc('lequ3_1221'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x2x1'],
[1, 2, 2, 2, format(abs(scaled_wc('lequ3_1222'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x2x2'],
[1, 2, 2, 3, format(abs(scaled_wc('lequ3_1223'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x2x3'],
[1, 2, 3, 1, format(abs(scaled_wc('lequ3_1231'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x3x1'],
[1, 2, 3, 2, format(abs(scaled_wc('lequ3_1232'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x3x2'],
[1, 2, 3, 3, format(abs(scaled_wc('lequ3_1233'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x2x3x3'],
[1, 3, 1, 1, format(abs(scaled_wc('lequ3_1311'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x1x1'],
[1, 3, 1, 2, format(abs(scaled_wc('lequ3_1312'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x1x2'],
[1, 3, 1, 3, format(abs(scaled_wc('lequ3_1313'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x1x3'],
[1, 3, 2, 1, format(abs(scaled_wc('lequ3_1321'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x2x1'],
[1, 3, 2, 2, format(abs(scaled_wc('lequ3_1322'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x2x2'],
[1, 3, 2, 3, format(abs(scaled_wc('lequ3_1323'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x2x3'],
[1, 3, 3, 1, format(abs(scaled_wc('lequ3_1331'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x3x1'],
[1, 3, 3, 2, format(abs(scaled_wc('lequ3_1332'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x3x2'],
[1, 3, 3, 3, format(abs(scaled_wc('lequ3_1333'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs1x3x3x3'],
[2, 1, 1, 1, format(abs(scaled_wc('lequ3_2111'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x1x1'],
[2, 1, 1, 2, format(abs(scaled_wc('lequ3_2112'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x1x2'],
[2, 1, 1, 3, format(abs(scaled_wc('lequ3_2113'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x1x3'],
[2, 1, 2, 1, format(abs(scaled_wc('lequ3_2121'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x2x1'],
[2, 1, 2, 2, format(abs(scaled_wc('lequ3_2122'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x2x2'],
[2, 1, 2, 3, format(abs(scaled_wc('lequ3_2123'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x2x3'],
[2, 1, 3, 1, format(abs(scaled_wc('lequ3_2131'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x3x1'],
[2, 1, 3, 2, format(abs(scaled_wc('lequ3_2132'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x3x2'],
[2, 1, 3, 3, format(abs(scaled_wc('lequ3_2133'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x1x3x3'],
[2, 2, 1, 1, format(abs(scaled_wc('lequ3_2211'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x1x1'],
[2, 2, 1, 2, format(abs(scaled_wc('lequ3_2212'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x1x2'],
[2, 2, 1, 3, format(abs(scaled_wc('lequ3_2213'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x1x3'],
[2, 2, 2, 1, format(abs(scaled_wc('lequ3_2221'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x2x1'],
[2, 2, 2, 2, format(abs(scaled_wc('lequ3_2222'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x2x2'],
[2, 2, 2, 3, format(abs(scaled_wc('lequ3_2223'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x2x3'],
[2, 2, 3, 1, format(abs(scaled_wc('lequ3_2231'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x3x1'],
[2, 2, 3, 2, format(abs(scaled_wc('lequ3_2232'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x3x2'],
[2, 2, 3, 3, format(abs(scaled_wc('lequ3_2233'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x2x3x3'],
[2, 3, 1, 1, format(abs(scaled_wc('lequ3_2311'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x1x1'],
[2, 3, 1, 2, format(abs(scaled_wc('lequ3_2312'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x1x2'],
[2, 3, 1, 3, format(abs(scaled_wc('lequ3_2313'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x1x3'],
[2, 3, 2, 1, format(abs(scaled_wc('lequ3_2321'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x2x1'],
[2, 3, 2, 2, format(abs(scaled_wc('lequ3_2322'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x2x2'],
[2, 3, 2, 3, format(abs(scaled_wc('lequ3_2323'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x2x3'],
[2, 3, 3, 1, format(abs(scaled_wc('lequ3_2331'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x3x1'],
[2, 3, 3, 2, format(abs(scaled_wc('lequ3_2332'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x3x2'],
[2, 3, 3, 3, format(abs(scaled_wc('lequ3_2333'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs2x3x3x3'],
[3, 1, 1, 1, format(abs(scaled_wc('lequ3_3111'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x1x1'],
[3, 1, 1, 2, format(abs(scaled_wc('lequ3_3112'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x1x2'],
[3, 1, 1, 3, format(abs(scaled_wc('lequ3_3113'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x1x3'],
[3, 1, 2, 1, format(abs(scaled_wc('lequ3_3121'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x2x1'],
[3, 1, 2, 2, format(abs(scaled_wc('lequ3_3122'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x2x2'],
[3, 1, 2, 3, format(abs(scaled_wc('lequ3_3123'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x2x3'],
[3, 1, 3, 1, format(abs(scaled_wc('lequ3_3131'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x3x1'],
[3, 1, 3, 2, format(abs(scaled_wc('lequ3_3132'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x3x2'],
[3, 1, 3, 3, format(abs(scaled_wc('lequ3_3133'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x1x3x3'],
[3, 2, 1, 1, format(abs(scaled_wc('lequ3_3211'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x1x1'],
[3, 2, 1, 2, format(abs(scaled_wc('lequ3_3212'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x1x2'],
[3, 2, 1, 3, format(abs(scaled_wc('lequ3_3213'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x1x3'],
[3, 2, 2, 1, format(abs(scaled_wc('lequ3_3221'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x2x1'],
[3, 2, 2, 2, format(abs(scaled_wc('lequ3_3222'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x2x2'],
[3, 2, 2, 3, format(abs(scaled_wc('lequ3_3223'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x2x3'],
[3, 2, 3, 1, format(abs(scaled_wc('lequ3_3231'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x3x1'],
[3, 2, 3, 2, format(abs(scaled_wc('lequ3_3232'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x3x2'],
[3, 2, 3, 3, format(abs(scaled_wc('lequ3_3233'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x2x3x3'],
[3, 3, 1, 1, format(abs(scaled_wc('lequ3_3311'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x1x1'],
[3, 3, 1, 2, format(abs(scaled_wc('lequ3_3312'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x1x2'],
[3, 3, 1, 3, format(abs(scaled_wc('lequ3_3313'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x1x3'],
[3, 3, 2, 1, format(abs(scaled_wc('lequ3_3321'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x2x1'],
[3, 3, 2, 2, format(abs(scaled_wc('lequ3_3322'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x2x2'],
[3, 3, 2, 3, format(abs(scaled_wc('lequ3_3323'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x2x3'],
[3, 3, 3, 1, format(abs(scaled_wc('lequ3_3331'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x3x1'],
[3, 3, 3, 2, format(abs(scaled_wc('lequ3_3332'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x3x2'],
[3, 3, 3, 3, format(abs(scaled_wc('lequ3_3333'))* lambda_smeft_value**2, '.6e'), '# cLeQu3Abs3x3x3x3'],
]}
card['Block']['FRBlock82'] = {'values': [
[1, 1, 1, 1, format(angle(scaled_wc('lequ3_1111')), '.6e'), '# cLeQu3Ph1x1x1x1'],
[1, 1, 1, 2, format(angle(scaled_wc('lequ3_1112')), '.6e'), '# cLeQu3Ph1x1x1x2'],
[1, 1, 1, 3, format(angle(scaled_wc('lequ3_1113')), '.6e'), '# cLeQu3Ph1x1x1x3'],
[1, 1, 2, 1, format(angle(scaled_wc('lequ3_1121')), '.6e'), '# cLeQu3Ph1x1x2x1'],
[1, 1, 2, 2, format(angle(scaled_wc('lequ3_1122')), '.6e'), '# cLeQu3Ph1x1x2x2'],
[1, 1, 2, 3, format(angle(scaled_wc('lequ3_1123')), '.6e'), '# cLeQu3Ph1x1x2x3'],
[1, 1, 3, 1, format(angle(scaled_wc('lequ3_1131')), '.6e'), '# cLeQu3Ph1x1x3x1'],
[1, 1, 3, 2, format(angle(scaled_wc('lequ3_1132')), '.6e'), '# cLeQu3Ph1x1x3x2'],
[1, 1, 3, 3, format(angle(scaled_wc('lequ3_1133')), '.6e'), '# cLeQu3Ph1x1x3x3'],
[1, 2, 1, 1, format(angle(scaled_wc('lequ3_1211')), '.6e'), '# cLeQu3Ph1x2x1x1'],
[1, 2, 1, 2, format(angle(scaled_wc('lequ3_1212')), '.6e'), '# cLeQu3Ph1x2x1x2'],
[1, 2, 1, 3, format(angle(scaled_wc('lequ3_1213')), '.6e'), '# cLeQu3Ph1x2x1x3'],
[1, 2, 2, 1, format(angle(scaled_wc('lequ3_1221')), '.6e'), '# cLeQu3Ph1x2x2x1'],
[1, 2, 2, 2, format(angle(scaled_wc('lequ3_1222')), '.6e'), '# cLeQu3Ph1x2x2x2'],
[1, 2, 2, 3, format(angle(scaled_wc('lequ3_1223')), '.6e'), '# cLeQu3Ph1x2x2x3'],
[1, 2, 3, 1, format(angle(scaled_wc('lequ3_1231')), '.6e'), '# cLeQu3Ph1x2x3x1'],
[1, 2, 3, 2, format(angle(scaled_wc('lequ3_1232')), '.6e'), '# cLeQu3Ph1x2x3x2'],
[1, 2, 3, 3, format(angle(scaled_wc('lequ3_1233')), '.6e'), '# cLeQu3Ph1x2x3x3'],
[1, 3, 1, 1, format(angle(scaled_wc('lequ3_1311')), '.6e'), '# cLeQu3Ph1x3x1x1'],
[1, 3, 1, 2, format(angle(scaled_wc('lequ3_1312')), '.6e'), '# cLeQu3Ph1x3x1x2'],
[1, 3, 1, 3, format(angle(scaled_wc('lequ3_1313')), '.6e'), '# cLeQu3Ph1x3x1x3'],
[1, 3, 2, 1, format(angle(scaled_wc('lequ3_1321')), '.6e'), '# cLeQu3Ph1x3x2x1'],
[1, 3, 2, 2, format(angle(scaled_wc('lequ3_1322')), '.6e'), '# cLeQu3Ph1x3x2x2'],
[1, 3, 2, 3, format(angle(scaled_wc('lequ3_1323')), '.6e'), '# cLeQu3Ph1x3x2x3'],
[1, 3, 3, 1, format(angle(scaled_wc('lequ3_1331')), '.6e'), '# cLeQu3Ph1x3x3x1'],
[1, 3, 3, 2, format(angle(scaled_wc('lequ3_1332')), '.6e'), '# cLeQu3Ph1x3x3x2'],
[1, 3, 3, 3, format(angle(scaled_wc('lequ3_1333')), '.6e'), '# cLeQu3Ph1x3x3x3'],
[2, 1, 1, 1, format(angle(scaled_wc('lequ3_2111')), '.6e'), '# cLeQu3Ph2x1x1x1'],
[2, 1, 1, 2, format(angle(scaled_wc('lequ3_2112')), '.6e'), '# cLeQu3Ph2x1x1x2'],
[2, 1, 1, 3, format(angle(scaled_wc('lequ3_2113')), '.6e'), '# cLeQu3Ph2x1x1x3'],
[2, 1, 2, 1, format(angle(scaled_wc('lequ3_2121')), '.6e'), '# cLeQu3Ph2x1x2x1'],
[2, 1, 2, 2, format(angle(scaled_wc('lequ3_2122')), '.6e'), '# cLeQu3Ph2x1x2x2'],
[2, 1, 2, 3, format(angle(scaled_wc('lequ3_2123')), '.6e'), '# cLeQu3Ph2x1x2x3'],
[2, 1, 3, 1, format(angle(scaled_wc('lequ3_2131')), '.6e'), '# cLeQu3Ph2x1x3x1'],
[2, 1, 3, 2, format(angle(scaled_wc('lequ3_2132')), '.6e'), '# cLeQu3Ph2x1x3x2'],
[2, 1, 3, 3, format(angle(scaled_wc('lequ3_2133')), '.6e'), '# cLeQu3Ph2x1x3x3'],
[2, 2, 1, 1, format(angle(scaled_wc('lequ3_2211')), '.6e'), '# cLeQu3Ph2x2x1x1'],
[2, 2, 1, 2, format(angle(scaled_wc('lequ3_2212')), '.6e'), '# cLeQu3Ph2x2x1x2'],
[2, 2, 1, 3, format(angle(scaled_wc('lequ3_2213')), '.6e'), '# cLeQu3Ph2x2x1x3'],
[2, 2, 2, 1, format(angle(scaled_wc('lequ3_2221')), '.6e'), '# cLeQu3Ph2x2x2x1'],
[2, 2, 2, 2, format(angle(scaled_wc('lequ3_2222')), '.6e'), '# cLeQu3Ph2x2x2x2'],
[2, 2, 2, 3, format(angle(scaled_wc('lequ3_2223')), '.6e'), '# cLeQu3Ph2x2x2x3'],
[2, 2, 3, 1, format(angle(scaled_wc('lequ3_2231')), '.6e'), '# cLeQu3Ph2x2x3x1'],
[2, 2, 3, 2, format(angle(scaled_wc('lequ3_2232')), '.6e'), '# cLeQu3Ph2x2x3x2'],
[2, 2, 3, 3, format(angle(scaled_wc('lequ3_2233')), '.6e'), '# cLeQu3Ph2x2x3x3'],
[2, 3, 1, 1, format(angle(scaled_wc('lequ3_2311')), '.6e'), '# cLeQu3Ph2x3x1x1'],
[2, 3, 1, 2, format(angle(scaled_wc('lequ3_2312')), '.6e'), '# cLeQu3Ph2x3x1x2'],
[2, 3, 1, 3, format(angle(scaled_wc('lequ3_2313')), '.6e'), '# cLeQu3Ph2x3x1x3'],
[2, 3, 2, 1, format(angle(scaled_wc('lequ3_2321')), '.6e'), '# cLeQu3Ph2x3x2x1'],
[2, 3, 2, 2, format(angle(scaled_wc('lequ3_2322')), '.6e'), '# | |
<filename>vaccination_level.py
#!/usr/bin/env python
# 1. Get request from developer tools in chrome based browser
# 2. https://curl.trillworks.com/
import sys
import requests
import sqlite3
import time
import hashlib
import signal
import argparse
from datetime import datetime, date, timedelta
import math
headers = {
'sec-ch-ua': '" Not;A Brand";v="99", "Microsoft Edge";v="91", "Chromium";v="91"',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://www.gov.pl/web/szczepienia-gmin',
'DNT': '1',
'X-Requested-With': 'XMLHttpRequest',
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36 Edg/91.0.864.53',
}
params = (
('segment', 'A,B,C'),
)
debug_logs = True
db_name = 'vaccination_level.db'
repl = str.maketrans(
"ąćęłńóśźżĄĆĘŁŃÓŚŹŻ",
"acelnoszzACELNOSZZ"
)
def get_json():
timestamp = time.time()
response = requests.get('https://www.gov.pl/api/data/covid-vaccination-contest/results-details', headers=headers, params=params)
if response.ok is False:
return None, None, None
hash_md5 = hashlib.md5(response.text.encode())
return response.json(), int(timestamp), hash_md5.hexdigest()
def timestamp_to_utcdatetime(timestamp):
return datetime.utcfromtimestamp(timestamp)
def nice_date(timestamp: int):
return timestamp_to_utcdatetime(timestamp).strftime('%Y/%m/%d')
class VoivodeshipVaccineData:
def __init__(self, timestamp: int, voivodeship: str, population: int, full_vaccinated_amount: int):
self.timestamp = timestamp
self.voivodeship = voivodeship
self.population = population
self.full_vaccinated_amount = full_vaccinated_amount
self.full_vaccinated_percent = self.full_vaccinated_amount / self.population
def update(self, population: int, full_vaccinated_amount: int):
self.population += population
self.full_vaccinated_amount += full_vaccinated_amount
self.full_vaccinated_percent = self.full_vaccinated_amount / self.population
def percent_string(self):
return '{:.4f}%'.format(self.full_vaccinated_percent * 100)
class CommunityVaccineData:
def __init__(self, json_entry):
self.voivodeship = json_entry['voivodeship'].translate(repl)
self.county = json_entry['county'].translate(repl)
self.community = json_entry['community'].translate(repl)
self.community_type = json_entry['community_type']
self.teryt = json_entry['teryt_code']
self.population = 0
self.full_vaccinated_amount = 0
self.full_vaccinated_percent = 0
self.update(json_entry)
def update(self, json_entry):
self.population += json_entry['population']
self.full_vaccinated_amount += json_entry['full_vaccinated_amount']
self.full_vaccinated_percent = self.full_vaccinated_amount / self.population
class PlotData:
def __init__(self):
self.name = ""
self.x = []
self.y = []
run = True
def signal_handler(sig, frame):
print('Ctrl-C caught - closing')
global run
run = False
def update_db():
json_resp, timestamp, hash_md5 = get_json()
if json_resp:
voivodeships = {}
communities = []
for entry in json_resp:
v = entry['voivodeship'].translate(repl)
if v in voivodeships:
voivodeships[v].update(entry['population'], entry['full_vaccinated_amount'])
else:
voivodeships[v] = VoivodeshipVaccineData(timestamp, v, entry['population'], entry['full_vaccinated_amount'])
communities.append(CommunityVaccineData(entry))
create_db()
if hash_exists(hash_md5):
print(f'{timestamp} - nothing to be done - data already in db')
return -1
else:
update_voivodeships(timestamp, voivodeships, hash_md5)
update_communities(timestamp, communities)
return 0
def update(args):
if args.continuous is False:
return update_db()
else:
signal.signal(signal.SIGINT, signal_handler)
while run:
update_db()
print('sleep')
for i in range(0, 120):
if run is False:
break
# print(f'sleep {i+1}/120')
time.sleep(30)
print('bye')
return 0
headers_table = [
'WOJEWODZTWO'
]
from pathlib import Path
herd_immunity = 70.0
def when_herd_immunity(start, end):
delta_percent = (end.full_vaccinated_percent * 100) - (start.full_vaccinated_percent * 100)
end_date = timestamp_to_utcdatetime(end.timestamp).date()
start_date = timestamp_to_utcdatetime(start.timestamp).date()
delta_days = (end_date - start_date).days
daily_increase_average = delta_percent / delta_days
#print(f'{end.voivodeship} start: {start_date}, end: {end_date}, days: {delta_days} delta_percent: {delta_percent}% daily_incr: {daily_increase_average}%')
percent_required = herd_immunity - (end.full_vaccinated_percent * 100)
days_to_herd_immunity = int(math.ceil(percent_required / daily_increase_average))
herd_immunity_date = timestamp_to_utcdatetime(end.timestamp).date() + timedelta(days=days_to_herd_immunity)
return daily_increase_average, herd_immunity_date, days_to_herd_immunity
def stats(args):
plot_dates = []
plot_data = []
output=sys.stdout
if args.output:
path = Path(args.output)
if path.exists() == False or path.is_file():
output = path.open('w')
timestamps = get_timestamps()
voivodeships = get_voivodeships()
v_len = len(max(voivodeships, key=len))
d_len = len(nice_date(0))
v_string = '{:' + str(v_len) + 's} | '
t_string = '{:>' + str(d_len) + 's} | '
herd_string = t_string + '{:.4f}%/dzien | {:>3s} dni | {:>' + str(d_len) + 's}'
herd_immunity_lines = []
stats_lines = []
desc_lines = []
# create table header
header = v_string.format(headers_table[0])
desc_lines.append(header)
desc_len = len(header)
header = ''
skip_idx = []
for idx in range(0, len(timestamps)):
timestamp = timestamps[idx]
new_date = timestamp_to_utcdatetime(timestamp).date()
if len(plot_dates) > 0 and plot_dates[-1] == new_date:
skip_idx.append(idx-1)
continue
nice_timestamp = nice_date(timestamp)
header += t_string.format(nice_timestamp)
plot_dates.append(new_date)
stats_len = len(header)
stats_lines.append(header)
header = ('{:>' + str(d_len) + 's} | KIEDY ODPORNOSC STADNA {:.0f}% ').format(plot_dates[-1].strftime('%Y/%m/%d'), herd_immunity)
herd_immunity_len = len(header)
herd_immunity_lines.append(header)
header = ''
#print(header, file=output)
desc_line_separator = '-' * desc_len
stats_line_separator = '-' * stats_len
herd_immunity_line_separator = '-' * herd_immunity_len
desc_lines.append(desc_line_separator)
stats_lines.append(stats_line_separator)
herd_immunity_lines.append('-' * herd_immunity_len)
# here it is assumed that no new voivodeships will be created ;), and always all will have data
off = 2 # header + separator
if len(voivodeships) > 0:
plot_entry = PlotData()
plot_entry.name = voivodeships[0]
plot_entry.x = plot_dates
master_data = get_voivodeship_data(voivodeships[0])
out = v_string.format(voivodeships[0])
desc_lines.append(out)
herd_immunity_lines.append('')
stats_lines.append('')
last_idx = 0
for idx in range(0, len(master_data)):
if idx in skip_idx:
continue
last_idx = idx
stats_lines[0+off] += t_string.format(master_data[idx].percent_string())
plot_entry.y.append(master_data[idx].full_vaccinated_percent)
daily_increase_average, herd_immunity_date, days_to_herd_immunity = when_herd_immunity(master_data[0], master_data[last_idx])
herd_immunity_lines[0+off] += herd_string.format(master_data[last_idx].percent_string(), daily_increase_average, str(days_to_herd_immunity), herd_immunity_date.strftime('%Y/%m/%d'))
#print(out, file=output)
plot_data.append(plot_entry)
for i in range(1, len(voivodeships)):
plot_entry = PlotData()
plot_entry.name = voivodeships[i]
plot_entry.x = plot_dates
data = get_voivodeship_data(voivodeships[i])
out = v_string.format(voivodeships[i])
desc_lines.append(out)
herd_immunity_lines.append('')
stats_lines.append('')
last_idx = 0
for j in range(0, len(data)):
if j in skip_idx:
continue
last_idx = j
stats_lines[i+off] += t_string.format(data[j].percent_string())
plot_entry.y.append(data[j].full_vaccinated_percent)
master_data[j].update(data[j].population, data[j].full_vaccinated_amount)
daily_increase_average, herd_immunity_date, days_to_herd_immunity = when_herd_immunity(data[0], data[last_idx])
herd_immunity_lines[i+off] += herd_string.format(data[last_idx].percent_string(), daily_increase_average, str(days_to_herd_immunity), herd_immunity_date.strftime('%Y/%m/%d'))
#print(out, file=output)
plot_data.append(plot_entry)
desc_lines.append(desc_line_separator)
stats_lines.append(stats_line_separator)
herd_immunity_lines.append(herd_immunity_line_separator)
#print(line_separator, file=output)
desc_lines.append(v_string.format('POLSKA'))
stats_lines.append('')
herd_immunity_lines.append('')
plot_entry = PlotData()
plot_entry.name = 'POLSKA'
plot_entry.x = plot_dates
last_idx = 0
for idx in range(0, len(master_data)):
if idx in skip_idx:
continue
last_idx = idx
stats_lines[-1] += t_string.format(master_data[idx].percent_string())
plot_entry.y.append(master_data[idx].full_vaccinated_percent)
daily_increase_average, herd_immunity_date, days_to_herd_immunity = when_herd_immunity(master_data[0], master_data[last_idx])
herd_immunity_lines[-1] += herd_string.format(master_data[last_idx].percent_string(), daily_increase_average, str(days_to_herd_immunity), herd_immunity_date.strftime('%Y/%m/%d'))
plot_data.insert(0, plot_entry)
#print(out, file=output)
print('# Opracowanie na podstawie danych z https://www.gov.pl/web/szczepienia-gmin', file=output)
print('', file=output)
chart_list = generate_chart('level', f'({plot_dates[-1]}) Procent zaszczepionych w Polsce', plot_data)
for chart in chart_list:
print(chart, file=output)
if args.md:
print('```', file=output)
for i in range(0, len(herd_immunity_lines)):
print(f"{desc_lines[i]}{herd_immunity_lines[i]}", file=output)
if args.md:
print('```', file=output)
print('', file=output)
if args.md:
print('```', file=output)
for i in range(0, len(stats_lines)):
print(f"{desc_lines[i]}{stats_lines[i]}", file=output)
print('', file=output)
print(f'zaszczepieni/populacja:\n{master_data[j].full_vaccinated_amount}/{master_data[j].population}', file=output)
if args.md:
print('```', file=output)
if output != sys.stdout:
output.close()
return 0
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-v', '--verbose', action='count', help='verbose output')
sub = ap.add_subparsers()
update_ap = sub.add_parser('update', help='updates db')
update_ap.add_argument('-c', '--continuous', action='store_true', help='runs update periodically - ctrl-c to stop')
update_ap.set_defaults(func=update)
stats_ap = sub.add_parser('stats', help='prints stats')
stats_ap.add_argument('-m', '--md', action='store_true', help='adds md headers')
stats_ap.add_argument('-o', '--output', type=str, default=None, help='output stats to a file')
stats_ap.set_defaults(func=stats)
args = ap.parse_args()
return args.func(args)
def create_db():
conn = sqlite3.connect(db_name)
conn.execute('''CREATE TABLE IF NOT EXISTS Voivodeships
(time INTEGER,
voivodeship TEXT,
population INTEGER DEFAULT 0,
full_vaccinated_amount INTEGER DEFAULT 0
);''')
conn.execute('''CREATE TABLE IF NOT EXISTS Timestamps
(time INTEGER PRIMARY KEY ASC,
hash_md5 TEXT);''')
conn.execute('''CREATE TABLE IF NOT EXISTS Communities_info
(id INTEGER PRIMARY KEY AUTOINCREMENT,
county TEXT,
community TEXT,
voivodeship TEXT,
community_type INTEGER,
teryt TEXT
);''')
conn.execute('''CREATE TABLE IF NOT EXISTS Communities
(time INTEGER,
id INTEGER,
population INTEGER,
full_vaccinated_amount INTEGER,
PRIMARY KEY (time, id));''')
conn.commit()
conn.close()
def update_communities(timestamp: int, communities):
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
print(f'{timestamp}')
# Update Communities table - should be done once only
for v in communities:
p = (v.county,v.community,v.voivodeship,v.community_type,v.teryt)
cursor.execute('REPLACE INTO Communities_info (county,community, voivodeship,community_type,teryt) VALUES (?,?,?,?,?)', p)
conn.commit()
for v in communities:
cursor.execute("SELECT id FROM Communities_info WHERE teryt=:TERYT", {'TERYT': v.teryt})
result = cursor.fetchone()
#print(f'{result} - {v.voivodeship} {v.county} {v.community}')
p = (timestamp, result[0], v.population, v.full_vaccinated_amount)
cursor.execute("INSERT INTO Communities (time,id,population,full_vaccinated_amount) VALUES (?,?,?,?)", p)
conn.commit()
print(f'{timestamp} - counties - insert done')
conn.close()
def hash_exists(hash_md5):
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
cursor.execute("SELECT time FROM Timestamps WHERE hash_md5=:NAME", {'NAME': hash_md5})
result = cursor.fetchone()
if result is None:
conn.close()
return False
print(f'{hash_md5} - exists with timestamp {result[0]}')
conn.close()
return True
def update_voivodeships(timestamp, voivodeships, hash_md5):
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
p = (timestamp,hash_md5)
cursor.execute('REPLACE INTO Timestamps (time,hash_md5) VALUES (?,?)', p)
for key, v in voivodeships.items():
print(f'{v.voivodeship}: {v.full_vaccinated_amount}/{v.population} = {v.full_vaccinated_percent * 100}%')
p = (timestamp, v.voivodeship, v.population, v.full_vaccinated_amount)
cursor.execute('REPLACE INTO Voivodeships (time,voivodeship,population,full_vaccinated_amount) VALUES (?,?,?,?)', p)
conn.commit()
print(f'{timestamp} - counties - insert done')
conn.close()
def get_voivodeships():
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
cursor.execute('SELECT DISTINCT voivodeship FROM Voivodeships ORDER BY voivodeship')
results = cursor.fetchall()
out = list(map(lambda x: x[0], results))
conn.close()
return out
def get_timestamps():
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
cursor.execute('SELECT DISTINCT time FROM Timestamps ORDER BY time ASC')
results = cursor.fetchall()
out = list(map(lambda x: x[0], results))
conn.close()
return out
def get_voivodeship_data(voivodeship: str):
out = []
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
cursor.execute('SELECT time,voivodeship,population,full_vaccinated_amount FROM Voivodeships WHERE voivodeship=:NAME ORDER BY time ASC', {'NAME': voivodeship})
entry = cursor.fetchone()
while entry:
out.append(VoivodeshipVaccineData(timestamp=entry[0], voivodeship=entry[1], population=entry[2], full_vaccinated_amount=entry[3]))
entry = cursor.fetchone()
conn.close()
return out
import plotly.graph_objects as go
from lxml import etree
def generate_chart(filename: str, decription: str, charts_data: PlotData):
output = []
chart_dir_name = "charts"
chart_dir_path = Path(chart_dir_name)
try:
chart_dir_path.mkdir(parents=True, exist_ok=True)
except FileExistsError as ex:
print(f'{ex}', file=sys.stderr)
return output
line_styles = [
None,
'dot',
'dash',
'dashdot'
]
color_count = 10
fig = go.Figure()
fig.update_yaxes(tickformat="%")
# https://plotly.com/python/reference/#layout-xaxis-nticks
# If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` | |
<gh_stars>1-10
# -*- coding: UTF-8; indent-tabs-mode:nil; tab-width:4 -*-
# This file is part of DITA DTD Generator.
#
# Copyright 2009 <NAME> <http://www.elovirta.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ditagen.dita
from ditagen.dtdgen import Particle as Particle
from ditagen.dtdgen import Choice as Choice
from ditagen.dtdgen import Name as Name
from ditagen.dtdgen import Seq as Seq
from ditagen.dtdgen import Attribute as Attribute
from ditagen.dtdgen import Param as Param
from ditagen.dtdgen import ParameterEntity as ParameterEntity
# Elements
#####################################################################
OPTIONAL = Particle.Occurrences.OPTIONAL
ZERO_OR_MORE = Particle.Occurrences.ZERO_OR_MORE
class TopicElement(ditagen.dita.DitaElement):
"""Topic element."""
name = u"topic"
cls = u"- topic/topic "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("body"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ConceptElement(ditagen.dita.DitaElement):
"""Concept element."""
name = u"concept"
cls = u"- topic/topic concept/concept "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("conbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class TaskElement(ditagen.dita.DitaElement):
"""Task element."""
name = u"task"
cls = u"- topic/topic task/task "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("taskbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ReferenceElement(ditagen.dita.DitaElement):
"""Reference element."""
name = u"reference"
cls = u"- topic/topic reference/reference "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("refbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossentryElement(ditagen.dita.DitaElement):
"""Glossary entry element."""
name = u"glossentry"
cls = u"- topic/topic concept/concept glossentry/glossentry "
model = Seq([
Choice(ParameterEntity("glossterm")),
Choice(ParameterEntity("glossdef"), OPTIONAL),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("glossBody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossgroupElement(ditagen.dita.DitaElement):
"""Glossary group element."""
name = u"glossgroup"
cls = u"- topic/topic concept/concept glossgroup/glossgroup "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningBaseElement(ditagen.dita.DitaElement):
"""Learning Base element."""
name = u"learningBase"
cls = u"- topic/topic learningBase/learningBase "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningBasebody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningAssessmentElement(ditagen.dita.DitaElement):
"""Learning Assessment element."""
name = u"learningAssessment"
cls = u"- topic/topic learningBase/learningBase learningAssessment/learningAssessment "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningAssessmentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningOverviewElement(ditagen.dita.DitaElement):
"""Learning Overview element."""
name = u"learningOverview"
cls = u"- topic/topic learningBase/learningBase learningOverview/learningOverview "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningOverviewbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningPlanElement(ditagen.dita.DitaElement):
"""Learning Plan element."""
name = u"learningPlan"
cls = u"- topic/topic learningBase/learningBase learningPlan/learningPlan "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningPlanbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningSummaryElement(ditagen.dita.DitaElement):
"""Learning Summary element."""
name = u"learningSummary"
cls = u"- topic/topic learningBase/learningBase learningSummary/learningSummary "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningSummarybody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningContentElement(ditagen.dita.DitaElement):
"""Learning Content element."""
name = u"learningContent"
cls = u"- topic/topic learningBase/learningBase learningContent/learningContent "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningContentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class SubjectSchemeElement(ditagen.dita.DitaElement):
"""Subject scheme element."""
name = u"subjectScheme"
cls = u"- map/map subjectScheme/subjectScheme "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("enumerationdef"),
ParameterEntity("hasInstance"),
ParameterEntity("hasKind"),
ParameterEntity("hasNarrower"),
ParameterEntity("hasPart"),
ParameterEntity("hasRelated"),
ParameterEntity("navref"),
ParameterEntity("relatedSubjects"),
ParameterEntity("reltable"),
ParameterEntity("schemeref"),
ParameterEntity("subjectdef"),
ParameterEntity("subjectHead"),
ParameterEntity("subjectRelTable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class MapElement(ditagen.dita.DitaElement):
"""Map element."""
name = u"map"
cls = u"- map/map "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("navref"),
ParameterEntity("reltable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("title", "CDATA", "#IMPLIED"),
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class BookMapElement(ditagen.dita.DitaElement):
"""BookMap element."""
name = u"bookmap"
cls = u"- map/map bookmap/bookmap "
model = Seq([
Choice([Choice(ParameterEntity("title")), Choice(ParameterEntity("booktitle"))], OPTIONAL),
Choice(ParameterEntity("bookmeta"), OPTIONAL),
Choice(ParameterEntity("frontmatter"), OPTIONAL),
Choice(ParameterEntity("chapter"), ZERO_OR_MORE),
Choice(ParameterEntity("part"), ZERO_OR_MORE),
Choice([Choice(ParameterEntity("appendices"), OPTIONAL), Choice(ParameterEntity("appendix"), ZERO_OR_MORE)]),
Choice(ParameterEntity("backmatter"), OPTIONAL),
Choice(ParameterEntity("reltable"), ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
# Topic types
#####################################################################
class TopicType(ditagen.dita.Type):
"""Topic topic type."""
id = u"topic"
file = u"base/dtd/topic" # the .dtd file is at technicalContent
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Topic//EN"
title = u"Topic"
parent = None
root = TopicElement()
class ConceptType(TopicType):
"""Concept topic type."""
id = u"concept"
file = u"technicalContent/dtd/concept"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Concept//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Concept//EN"
title = u"Concept"
parent = TopicType()
root = ConceptElement()
class TaskType(TopicType):
"""Task topic type."""
id = u"task"
file = u"technicalContent/dtd/task"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task//EN"
title = u"Task"
parent = TopicType()
root = TaskElement()
def __init__(self):
super(TaskType, self).__init__()
#self.required_domains = [StrictTaskbodyConstraints]
class GeneralTaskType(ditagen.dita.ShellType):
"""General Task topic type."""
def __init__(self):
super(GeneralTaskType, self).__init__(u"generalTask", u"General Task", TaskType())
#self.parent.required_domains = []
class ReferenceType(TopicType):
"""Reference topic type."""
id = u"reference"
file = u"technicalContent/dtd/reference"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Reference//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Reference//EN"
title = u"Reference"
parent = TopicType()
root = ReferenceElement()
class MapType(ditagen.dita.Type):
"""Map topic type."""
id = u"map"
file = u"base/dtd/map" # the .dtd file is at technicalContent
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map//EN"
title = u"Map"
parent = None
root = MapElement()
class BookMapType(MapType):
"""BookMap topic type."""
id = u"bookmap"
file = u"bookmap/dtd/bookmap"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 BookMap//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 BookMap//EN"
title = u"BookMap"
parent = MapType()
root = BookMapElement()
class GlossentryType(ConceptType):
"""Glossary entry topic type."""
id = u"glossentry"
file = u"technicalContent/dtd/glossentry"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Entry//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Entry//EN"
title = u"Glossary Entry"
parent = ConceptType()
root = GlossentryElement()
class GlossgroupType(ConceptType):
"""Glossary group topic type."""
id = u"glossgroup"
file = u"technicalContent/dtd/glossgroup"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Group//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Group//EN"
title = u"Glossary Group"
parent = ConceptType()
root = GlossgroupElement()
class MachineryTaskType(ditagen.dita.ShellType):
"""Machinery Task topic type."""
def __init__(self):
super(MachineryTaskType, self).__init__(u"machineryTask", u"Machinery Task", TaskType(), file=u"machineryIndustry/dtd/machineryTask")
#self.parent.required_domains = [MachineryTaskbodyConstraints]
class LearningBaseType(TopicType):
"""Learning Base topic type."""
id = u"learningBase"
file = u"learning/dtd/learningBase"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Base//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Base//EN"
title = u"Learning Base"
parent = TopicType()
root = LearningBaseElement()
class LearningAssessmentType(LearningBaseType):
"""Learning Assessment topic type."""
id = u"learningAssessment"
file = u"learning/dtd/learningAssessment"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Assessment//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Assessment//EN"
title = u"Learning Assessment"
parent = LearningBaseType()
root = LearningAssessmentElement()
class LearningOverviewType(LearningBaseType):
"""Learning Overview topic type."""
id = u"learningOverview"
file = u"learning/dtd/learningOverview"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Overview//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Overview//EN"
title = u"Learning Overview"
parent = LearningBaseType()
root = LearningOverviewElement()
class LearningPlanType(LearningBaseType):
"""Learning Plan topic type."""
id = u"learningPlan"
file = u"learning/dtd/learningPlan"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Plan//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Plan//EN"
title = u"Learning Plan"
parent = LearningBaseType()
root = LearningPlanElement()
class LearningSummaryType(LearningBaseType):
"""Learning Summary topic type."""
id = u"learningSummary"
file = u"learning/dtd/learningSummary"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Summary//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Summary//EN"
title = u"Learning Summary"
parent = LearningBaseType()
root = LearningSummaryElement()
class LearningContentType(LearningBaseType):
"""Learning | |
(inet:ip-address-no-zone)
YANG Description: When an IP next-hop is specified in the next-hop field,
packets matching the match criteria for the forwarding rule
should be forwarded to the next-hop IP address, bypassing any
lookup on the local system.
"""
return self.__next_hop
def _set_next_hop(self, v, load=False):
"""
Setter method for next_hop, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/next_hop (inet:ip-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop() directly.
YANG Description: When an IP next-hop is specified in the next-hop field,
packets matching the match criteria for the forwarding rule
should be forwarded to the next-hop IP address, bypassing any
lookup on the local system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """next_hop must be of a type compatible with inet:ip-address-no-zone""",
"defined-type": "inet:ip-address-no-zone",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}),RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-address-no-zone', is_config=False)""",
}
)
self.__next_hop = t
if hasattr(self, "_set"):
self._set()
def _unset_next_hop(self):
self.__next_hop = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
discard = __builtin__.property(_get_discard)
decapsulate_gre = __builtin__.property(_get_decapsulate_gre)
network_instance = __builtin__.property(_get_network_instance)
path_selection_group = __builtin__.property(_get_path_selection_group)
next_hop = __builtin__.property(_get_next_hop)
_pyangbind_elements = OrderedDict(
[
("discard", discard),
("decapsulate_gre", decapsulate_gre),
("network_instance", network_instance),
("path_selection_group", path_selection_group),
("next_hop", next_hop),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/policy-forwarding/policies/policy/rules/rule/action/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state parameters relating to the
forwarding rule's action.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__discard",
"__decapsulate_gre",
"__network_instance",
"__path_selection_group",
"__next_hop",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__discard = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="discard",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__decapsulate_gre = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="decapsulate-gre",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__network_instance = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="network-instance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__path_selection_group = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="path-selection-group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__next_hop = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"policy-forwarding",
"policies",
"policy",
"rules",
"rule",
"action",
"state",
]
def _get_discard(self):
"""
Getter method for discard, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/discard (boolean)
YANG Description: When this leaf is set to true, the local system should drop
packets that match the rule.
"""
return self.__discard
def _set_discard(self, v, load=False):
"""
Setter method for discard, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/discard (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_discard is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_discard() directly.
YANG Description: When this leaf is set to true, the local system should drop
packets that match the rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="discard",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """discard must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="discard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__discard = t
if hasattr(self, "_set"):
self._set()
def _unset_discard(self):
self.__discard = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="discard",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_decapsulate_gre(self):
"""
Getter method for decapsulate_gre, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/decapsulate_gre (boolean)
YANG Description: When this leaf is set to true, the local system should remove
the GRE header from the packet matching the rule. Following
the decapsulation it should subsequently forward the
encapsulated packet according to the relevant lookup (e.g., if
the encapsulated packet is IP, the packet should be routed
according to the IP destination).
"""
return self.__decapsulate_gre
def _set_decapsulate_gre(self, v, load=False):
"""
Setter method for decapsulate_gre, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/decapsulate_gre (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_decapsulate_gre is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_decapsulate_gre() directly.
YANG Description: When this leaf is set to true, the local system should remove
the GRE header from the packet matching the rule. Following
the decapsulation it should subsequently forward the
encapsulated packet according to the relevant lookup (e.g., if
the encapsulated packet is IP, the packet should be routed
according to the IP destination).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="decapsulate-gre",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """decapsulate_gre must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="decapsulate-gre", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__decapsulate_gre = t
if hasattr(self, "_set"):
self._set()
def _unset_decapsulate_gre(self):
self.__decapsulate_gre = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="decapsulate-gre",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_network_instance(self):
"""
Getter method for network_instance, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/network_instance (leafref)
YANG Description: When this leaf is set, packets matching the match criteria
for the forwarding rule should be looked up in the
network-instance that is referenced rather than the
network-instance with which the interface is associated.
Such configuration allows policy-routing into multiple
sub-topologies from a single ingress access interface, or
different send and receive contexts for a particular
interface (sometimes referred to as half-duplex VRF).
"""
return self.__network_instance
def _set_network_instance(self, v, load=False):
"""
Setter method for network_instance, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/network_instance (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_network_instance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network_instance() directly.
YANG Description: When this leaf is set, packets matching the match criteria
for the forwarding rule should be looked up in the
network-instance that is referenced rather than the
network-instance with which the interface is associated.
Such configuration allows policy-routing into multiple
sub-topologies from a single ingress access interface, or
different send and receive contexts for a particular
interface (sometimes referred to as half-duplex VRF).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="network-instance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """network_instance must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__network_instance = t
if hasattr(self, "_set"):
self._set()
def _unset_network_instance(self):
self.__network_instance = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="network-instance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_path_selection_group(self):
"""
Getter method for path_selection_group, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/action/state/path_selection_group (leafref)
YANG Description: When path-selection-group is set, packets matching the
match criteria for the forwarding rule should be forwarded
only via one | |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstract class for the Trainer for both CAIP and Vertex."""
import abc
import datetime
import json
import random
from typing import Any, Dict, List, Optional, Union
from absl import logging
from google.cloud.aiplatform import gapic
from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob
from google.cloud.aiplatform_v1beta1.types.job_state import JobState
from googleapiclient import discovery
from tfx import types
from tfx.types import artifact_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
# Default container image being used for CAIP training jobs.
_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
# Entrypoint of cloud AI platform training. The module comes from `tfx`
# package installation into a default location of 'python'.
_CONTAINER_COMMAND = ['python', '-m', 'tfx.scripts.run_executor']
_VERTEX_ENDPOINT_SUFFIX = '-aiplatform.googleapis.com'
_VERTEX_JOB_STATE_SUCCEEDED = JobState.JOB_STATE_SUCCEEDED
_VERTEX_JOB_STATE_FAILED = JobState.JOB_STATE_FAILED
_VERTEX_JOB_STATE_CANCELLED = JobState.JOB_STATE_CANCELLED
class AbstractJobClient(abc.ABC):
"""Abstract class interacting with CAIP CMLE job or Vertex CustomJob."""
JOB_STATES_COMPLETED = () # Job states for success, failure or cancellation
JOB_STATES_FAILED = () # Job states for failure or cancellation
def __init__(self):
self.create_client()
self._job_name = '' # Assigned in self.launch_job()
@abc.abstractmethod
def create_client(self) -> None:
"""Creates the job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
pass
@abc.abstractmethod
def create_training_args(self, input_dict, output_dict, exec_properties,
executor_class_path, training_inputs,
job_id) -> Dict[str, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given.
Returns:
A dict containing the training arguments
"""
pass
@abc.abstractmethod
def _create_job_spec(
self,
job_id: str,
training_input: Dict[str, Any],
job_labels: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification.
"""
pass
@abc.abstractmethod
def launch_job(self,
job_id: str,
parent: str,
training_input: Dict[str, Any],
job_labels: Optional[Dict[str, str]] = None) -> None:
"""Launches a long-running job.
Args:
job_id: The job ID of the AI Platform training job.
parent: The project name in the form of 'projects/{project_id}'
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
pass
@abc.abstractmethod
def get_job(self) -> Union[Dict[str, str], CustomJob]:
"""Gets the the long-running job."""
pass
@abc.abstractmethod
def get_job_state(
self, response: Union[Dict[str, str], CustomJob]) -> Union[str, JobState]:
"""Gets the state of the long-running job.
Args:
response: The response from get_job
Returns:
The job state.
"""
pass
def get_job_name(self) -> str:
"""Gets the job name."""
return self._job_name
class CAIPJobClient(AbstractJobClient):
"""Class for interacting with CAIP CMLE job."""
JOB_STATES_COMPLETED = ('SUCCEEDED', 'FAILED', 'CANCELLED')
JOB_STATES_FAILED = ('FAILED', 'CANCELLED')
def create_client(self) -> None:
"""Creates the discovery job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
self._client = discovery.build(
'ml',
'v1',
requestBuilder=telemetry_utils.TFXHttpRequest,
)
def create_training_args(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any],
executor_class_path: str,
training_inputs: Dict[str, Any],
job_id: Optional[str]) -> Dict[str, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred.
For the full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given. Refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job
Returns:
A dict containing the training arguments
"""
training_inputs = training_inputs.copy()
json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)
logging.info('json_inputs=\'%s\'.', json_inputs)
json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)
logging.info('json_outputs=\'%s\'.', json_outputs)
json_exec_properties = json.dumps(exec_properties, sort_keys=True)
logging.info('json_exec_properties=\'%s\'.', json_exec_properties)
# We use custom containers to launch training on AI Platform, which invokes
# the specified image using the container's entrypoint. The default
# entrypoint for TFX containers is to call scripts/run_executor.py. The
# arguments below are passed to this run_executor entry to run the executor
# specified in `executor_class_path`.
container_command = _CONTAINER_COMMAND + [
'--executor_class_path',
executor_class_path,
'--inputs',
json_inputs,
'--outputs',
json_outputs,
'--exec-properties',
json_exec_properties,
]
if not training_inputs.get('masterConfig'):
training_inputs['masterConfig'] = {
'imageUri': _TFX_IMAGE,
}
# Always use our own entrypoint instead of relying on container default.
if 'containerCommand' in training_inputs['masterConfig']:
logging.warn('Overriding custom value of containerCommand')
training_inputs['masterConfig']['containerCommand'] = container_command
# Pop project_id so AIP doesn't complain about an unexpected parameter.
# It's been a stowaway in aip_args and has finally reached its destination.
project = training_inputs.pop('project')
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.make_labels_dict()
# 'tfx_YYYYmmddHHMMSS' is the default job ID if not explicitly specified.
job_id = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
training_args = {
'job_id': job_id,
'project': project,
'training_input': training_inputs,
'job_labels': job_labels
}
return training_args
def _create_job_spec(
self,
job_id: str,
training_input: Dict[str, Any],
job_labels: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification. See
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs
"""
job_spec = {
'jobId': job_id,
'trainingInput': training_input,
'labels': job_labels,
}
return job_spec
def launch_job(self,
job_id: str,
project: str,
training_input: Dict[str, Any],
job_labels: Optional[Dict[str, str]] = None) -> None:
"""Launches a long-running job.
Args:
job_id: The job ID of the AI Platform training job.
project: The GCP project under which the training job will be executed.
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
parent = 'projects/{}'.format(project)
job_spec = self._create_job_spec(job_id, training_input, job_labels)
# Submit job to AIP Training
logging.info('TrainingInput=%s', training_input)
logging.info('Submitting job=\'%s\', project=\'%s\' to AI Platform.',
job_id, parent)
request = self._client.projects().jobs().create(
body=job_spec, parent=parent)
self._job_name = '{}/jobs/{}'.format(parent, job_id)
request.execute()
def get_job(self) -> Dict[str, str]:
"""Gets the long-running job."""
request = self._client.projects().jobs().get(name=self._job_name)
return request.execute()
def get_job_state(self, response) -> str:
"""Gets the state of the long-running job.
Args:
response: The response from get_job
Returns:
The job state.
"""
return response['state']
class VertexJobClient(AbstractJobClient):
"""Class for interacting with Vertex CustomJob."""
JOB_STATES_COMPLETED = (_VERTEX_JOB_STATE_SUCCEEDED, _VERTEX_JOB_STATE_FAILED,
_VERTEX_JOB_STATE_CANCELLED)
JOB_STATES_FAILED = (_VERTEX_JOB_STATE_FAILED, _VERTEX_JOB_STATE_CANCELLED)
def __init__(self, vertex_region: str):
if vertex_region is None:
raise ValueError('Please specify a region for Vertex training.')
self._region = vertex_region
super().__init__()
def create_client(self) -> None:
"""Creates the Gapic job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
self._client = gapic.JobServiceClient(
client_options=dict(
api_endpoint=self._region + | |
<filename>benchmarking/driver/benchmark_driver.py
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import gc
import os
import sys
import time
import traceback
from utils.custom_logger import getLogger
from utils.utilities import getCommand, deepMerge, setRunStatus, getRunStatus
def runOneBenchmark(
info,
benchmark,
framework,
platform,
backend,
reporters,
lock,
cooldown=None,
user_identifier=None,
local_reporter=None,
):
assert "treatment" in info, "Treatment is missing in info"
getLogger().info("Running {}".format(benchmark["path"]))
status = 0
minfo = copy.deepcopy(info["treatment"])
mbenchmark = copy.deepcopy(benchmark)
if "shared_libs" in info:
minfo["shared_libs"] = info["shared_libs"]
try:
# invalidate CPU cache
[1.0 for _ in range(20 << 20)]
gc.collect()
data = _runOnePass(minfo, mbenchmark, framework, platform)
status = status | getRunStatus()
meta = None
if "control" in info:
cinfo = copy.deepcopy(info["control"])
if "shared_libs" in info:
cinfo["shared_libs"] = info["shared_libs"]
# cool down between treatment and control
if "model" in benchmark and "cooldown" in benchmark["model"]:
cooldown = float(benchmark["model"]["cooldown"])
time.sleep(cooldown)
# invalidate CPU cache
[1.0 for _ in range(20 << 20)]
gc.collect()
control = _runOnePass(cinfo, benchmark, framework, platform)
status = status | getRunStatus()
bname = benchmark["model"]["name"]
data = _mergeDelayData(data, control, bname)
if benchmark["tests"][0]["metric"] != "generic":
data = _adjustData(info, data)
meta = _retrieveMeta(
info, benchmark, platform, framework, backend, user_identifier
)
data = _retrieveInfo(info, data)
result = {"meta": meta, "data": data}
except Exception as e:
# Catch all exceptions so that failure in one test does not
# affect other tests
getLogger().info("Exception caught when running benchmark")
getLogger().info(e)
data = None
status = 2
setRunStatus(status)
getLogger().error(traceback.format_exc())
# Set result meta and data to default values to that
# the reporter will not try to key into a None
result = {"meta": {}, "data": []}
if data is None or len(data) == 0:
_logNoData(benchmark, info, platform.getMangledName())
return status
with lock:
for reporter in reporters:
reporter.report(result)
if (
"regression_commits" in info
and info["run_type"] == "benchmark"
and local_reporter
):
from regression_detectors.regression_detectors import checkRegressions
checkRegressions(
info,
platform,
framework,
benchmark,
reporters,
result["meta"],
local_reporter,
)
return status
def _logNoData(benchmark, info, name):
model_name = ""
if "model" in benchmark and "name" in benchmark["model"]:
model_name = benchmark["model"]["name"]
commit_hash = ""
if "commit" in info["treatment"]:
commit_hash = info["treatment"]["commit"]
getLogger().info(
"No data collected for {}".format(model_name)
+ "on {}. ".format(name)
+ "The run may be failed for "
+ "{}".format(commit_hash)
)
def _runOnePass(info, benchmark, framework, platform):
assert (
len(benchmark["tests"]) == 1
), "At this moment, only one test exists in the benchmark"
to = benchmark["model"]["repeat"] if "repeat" in benchmark["model"] else 1
output = None
for idx in range(to):
benchmark["tests"][0]["INDEX"] = idx
one_output, output_files = framework.runBenchmark(info, benchmark, platform)
if output:
deepMerge(output, one_output)
else:
output = copy.deepcopy(one_output)
if getRunStatus() != 0:
# early exit if there is an error
break
stats = _getStatisticsSet(benchmark["tests"][0])
data = _processDelayData(output, stats)
return data
def _processDelayData(input_data, stats):
if not isinstance(input_data, dict):
return input_data
data = {}
for k in input_data:
d = input_data[k]
if d is not None:
data[k] = copy.deepcopy(d)
if "values" in d:
if "summary" not in d:
data[k]["summary"] = _getStatistics(d["values"], stats)
if "num_runs" not in d:
data[k]["num_runs"] = len(data[k]["values"])
return data
def _mergeDelayData(treatment_data, control_data, bname):
data = copy.deepcopy(treatment_data)
# meta is not a metric, so handle is seperatly
data["meta"] = _mergeDelayMeta(treatment_data["meta"], control_data["meta"], bname)
for k in treatment_data:
# meta was already merged, so don't try to merge it again
if k == "meta":
continue
if k not in control_data:
getLogger().error(
"Value {} existed in treatment but not ".format(k)
+ "control for benchmark {}".format(bname)
)
continue
control_value = control_data[k]
treatment_value = treatment_data[k]
if "info_string" in treatment_value:
assert (
"info_string" in control_value
), "Control value missing info_string field"
# If the treatment and control are not the same,
# treatment value is used, the control value is lost.
treatment_string = treatment_value["info_string"]
control_string = control_value["info_string"]
if treatment_string != control_string:
getLogger().warning(
"Treatment value is used, and the control value is lost. "
+ "The field info_string in control "
+ "({})".format(control_string)
+ "is different from the info_string in treatment "
+ "({})".format(treatment_string)
)
if "values" in control_value:
data[k]["control_values"] = control_value["values"]
if "summary" in control_value:
data[k]["control_summary"] = control_value["summary"]
assert "summary" in treatment_value, "Summary is missing in treatment"
# create diff of delay
if "summary" in control_value and "summary" in treatment_value:
data[k]["diff_summary"] = _createDiffOfDelay(
control_value["summary"], treatment_value["summary"]
)
return data
def _to_float(token: str):
try:
return float(token)
except ValueError:
return None
def _percentileArgVal(token) -> float:
if len(token) < 2 or token[0] != "p":
return None
percentile = _to_float(token[1:])
return (
percentile
if percentile is not None and percentile >= 0 and percentile <= 100
else None
)
def _createDiffOfDelay(csummary, tsummary):
# create diff of delay
diff_summary = {}
for key in tsummary:
if tsummary[key] is None:
continue
arg = _percentileArgVal(key)
if arg is not None:
if arg == int(arg):
reflection = "p" + str(100 - int(arg))
else:
reflection = "p" + str(100.0 - arg)
if reflection in csummary and csummary[reflection] is not None:
diff_summary[key] = round(tsummary[key] - csummary[reflection], 15)
elif key in csummary and csummary[key] is not None:
diff_summary[key] = round(tsummary[key] - csummary[key], 15)
return diff_summary
def _mergeDelayMeta(treatment_meta, control_meta, bname):
meta = copy.deepcopy(treatment_meta)
for k in treatment_meta:
if k not in control_meta:
getLogger().error(
"Value {} existed in treatment but not ".format(k)
+ "control for benchmark {}".format(bname)
)
continue
meta["control_{}".format(k)] = control_meta[k]
return meta
def _processErrorData(treatment_files, golden_files, stats=None):
treatment_outputs = _collectErrorData(treatment_files)
golden_outputs = _collectErrorData(golden_files)
data = {}
for output in treatment_outputs:
treatment_values = treatment_outputs[output]
assert output in golden_outputs, "Output {} is missing in golden".format(output)
golden_values = golden_outputs[output]
diff_values = list(
map(lambda pair: pair[0] - pair[1], zip(treatment_values, golden_values))
)
diff_values.sort()
treatment_values.sort()
golden_values.sort()
data[output] = {
"summary": _getStatistics(treatment_values, stats),
"control_summary": _getStatistics(golden_values, stats),
"diff_summary": _getStatistics(diff_values, stats),
}
data[output]["type"] = output
data[output]["num_runs"] = len(treatment_values)
return data
def _collectErrorData(output_files):
data = {}
for output in output_files:
filename = output_files[output]
assert os.path.isfile(filename), "File {} doesn't exist".format(filename)
with open(filename, "r") as f:
content = f.read().splitlines()
data[output] = [float(x.strip()) for x in content]
return data
_default_statistics = ["mean", "p0", "p10", "p50", "p90", "p100", "stdev", "MAD", "cv"]
def _getStatisticsSet(test):
if test is not None and "statistics" in test:
result = test["statistics"]
if "p50" not in result:
result.append(
"p50"
) # always include p50 since it is needed for internal calculations
return result
else:
return _default_statistics
def _getStatistics(array, stats=_default_statistics):
if len(array) == 0:
return {}
if "p50" not in stats:
stats.append(
"p50"
) # always include p50 since it is needed for internal calculations
sorted_array = sorted(array)
median = _getMedian(sorted_array)
mean = _getMean(array)
stdev = _getStdev(array, mean)
meta_values = {
"mean": mean,
"p50": median, # special case for even-numbered arrays
"stdev": stdev,
"MAD": _getMedian(sorted(map(lambda x: abs(x - median), sorted_array))),
"cv": stdev / mean if mean != 0 else None,
}
results = {}
for stat in stats:
if stat in meta_values:
results[stat] = meta_values[stat]
else:
percentile_arg_value = _percentileArgVal(stat) # parses p0-p100
if percentile_arg_value is None:
getLogger().error(f"Unsupported custom statistic '{stat}' ignored.")
assert (
percentile_arg_value is not None
), f"Unsupported custom statistic '{stat}'."
else:
results[stat] = _getPercentile(sorted_array, percentile_arg_value)
return results
def _getPercentile(sorted_array, percentile: float):
length = len(sorted_array)
assert (
length > 0 and percentile >= 0 and percentile <= 100
), f"invalid percentile value '{percentile}'."
if percentile == 100:
return sorted_array[-1]
if percentile == 50:
return _getMedian(sorted_array)
# linear interpolation: exactly matches np.percentile(sorted_array, percentile, interpolation="linear")
k = (length - 1) * percentile / 100.0
floor_index = int(k)
ceil_index = int(k + 1.0) # valid only if k is not already an integer value
if (
floor_index == k or ceil_index >= length
): # handle the case where k is integer or max
return sorted_array[floor_index]
weighted_floor_value = sorted_array[floor_index] * (ceil_index - k)
weighted_ceil_value = sorted_array[ceil_index] * (k - floor_index)
return weighted_floor_value + weighted_ceil_value
def _getMean(values):
return sum(values) / len(values)
def _getStdev(values, mean):
sq_diffs = [(x - mean) ** 2 for x in values]
return (sum(sq_diffs) / len(values)) ** 0.5
def _getMedian(values):
length = len(values)
return (
values[length // 2]
if (length % 2) == 1
else (values[(length - 1) // 2] + values[length // 2]) / 2
)
def _adjustData(info, data):
if | |
a colection of things.
Especially useful for representing a collection of related variables."""
def __init__(self, **keywords):
self.__dict__.update(keywords)
def __repr__(self):
return self.toString()
def ivars(self):
return sorted(self.__dict__)
def keys(self):
return sorted(self.__dict__)
def toString(self):
tag = self.__dict__.get('tag')
entries = ["%s: %s" % (key, str(self.__dict__.get(key)) or repr(self.__dict__.get(key)))
for key in self.ivars() if key != 'tag']
result = ['g.Bunch(%s)' % (tag or '')]
result.extend(entries)
return '\n '.join(result) + '\n'
# Used by new undo code.
def __setitem__(self, key, value):
'''Support aBunch[key] = val'''
return operator.setitem(self.__dict__, key, value)
def __getitem__(self, key):
'''Support aBunch[key]'''
# g.pr('g.Bunch.__getitem__', key)
return operator.getitem(self.__dict__, key)
def get(self, key, theDefault=None):
return self.__dict__.get(key, theDefault)
def __contains__(self, key): # New.
# g.pr('g.Bunch.__contains__', key in self.__dict__, key)
return key in self.__dict__
bunch = Bunch
#@+node:ekr.20120219154958.10492: *3* class g.EmergencyDialog
class EmergencyDialog:
"""A class that creates an tkinter dialog with a single OK button."""
#@+others
#@+node:ekr.20120219154958.10493: *4* emergencyDialog.__init__
def __init__(self, title, message):
"""Constructor for the leoTkinterDialog class."""
self.answer = None # Value returned from run()
self.title = title
self.message = message
self.buttonsFrame = None # Frame to hold typical dialog buttons.
self.defaultButtonCommand = None
# Command to call when user closes the window
# by clicking the close box.
self.frame = None # The outermost frame.
self.root = None # Created in createTopFrame.
self.top = None # The toplevel Tk widget.
self.createTopFrame()
buttons = [{
"text": "OK",
"command": self.okButton,
"default": True,
}]
self.createButtons(buttons)
self.top.bind("<Key>", self.onKey)
#@+node:ekr.20120219154958.10494: *4* emergencyDialog.createButtons
def createButtons(self, buttons):
"""Create a row of buttons.
buttons is a list of dictionaries containing
the properties of each button.
"""
import tkinter as Tk
assert(self.frame)
self.buttonsFrame = f = Tk.Frame(self.top)
f.pack(side="top", padx=30)
# Buttons is a list of dictionaries, with an empty dictionary
# at the end if there is only one entry.
buttonList = []
for d in buttons:
text = d.get("text", "<missing button name>")
isDefault = d.get("default", False)
underline = d.get("underline", 0)
command = d.get("command", None)
bd = 4 if isDefault else 2
b = Tk.Button(f, width=6, text=text, bd=bd,
underline=underline, command=command)
b.pack(side="left", padx=5, pady=10)
buttonList.append(b)
if isDefault and command:
self.defaultButtonCommand = command
return buttonList
#@+node:ekr.20120219154958.10495: *4* emergencyDialog.createTopFrame
def createTopFrame(self):
"""Create the Tk.Toplevel widget for a leoTkinterDialog."""
import tkinter as Tk
self.root = Tk.Tk()
self.top = Tk.Toplevel(self.root)
self.top.title(self.title)
self.root.withdraw()
self.frame = Tk.Frame(self.top)
self.frame.pack(side="top", expand=1, fill="both")
label = Tk.Label(self.frame, text=self.message, bg='white')
label.pack(pady=10)
#@+node:ekr.20120219154958.10496: *4* emergencyDialog.okButton
def okButton(self):
"""Do default click action in ok button."""
self.top.destroy()
self.top = None
#@+node:ekr.20120219154958.10497: *4* emergencyDialog.onKey
def onKey(self, event):
"""Handle Key events in askOk dialogs."""
self.okButton()
#@+node:ekr.20120219154958.10498: *4* emergencyDialog.run
def run(self):
"""Run the modal emergency dialog."""
self.top.geometry("%dx%d%+d%+d" % (300, 200, 50, 50))
self.top.lift()
self.top.grab_set() # Make the dialog a modal dialog.
self.root.wait_window(self.top)
#@-others
#@+node:ekr.20040331083824.1: *3* class g.FileLikeObject
# Note: we could use StringIo for this.
class FileLikeObject:
"""Define a file-like object for redirecting writes to a string.
The caller is responsible for handling newlines correctly."""
#@+others
#@+node:ekr.20050404151753: *4* ctor (g.FileLikeObject)
def __init__(self, encoding='utf-8', fromString=None):
# New in 4.2.1: allow the file to be inited from string s.
self.encoding = encoding or 'utf-8'
if fromString:
self.list = g.splitLines(fromString) # Must preserve newlines!
else:
self.list = []
self.ptr = 0
# In CStringIO the buffer is read-only if the initial value (fromString) is non-empty.
#@+node:ekr.20050404151753.1: *4* clear (g.FileLikeObject)
def clear(self):
self.list = []
#@+node:ekr.20050404151753.2: *4* close (g.FileLikeObject)
def close(self):
pass
# The StringIo version free's the memory buffer.
#@+node:ekr.20050404151753.3: *4* flush (g.FileLikeObject)
def flush(self):
pass
#@+node:ekr.20050404151753.4: *4* get & getvalue & read (g.FileLikeObject)
def get(self):
return ''.join(self.list)
getvalue = get # for compatibility with StringIo
read = get # for use by sax.
#@+node:ekr.20050404151753.5: *4* readline (g.FileLikeObject)
def readline(self):
'''Read the next line using at.list and at.ptr.'''
if self.ptr < len(self.list):
line = self.list[self.ptr]
self.ptr += 1
return line
return ''
#@+node:ekr.20050404151753.6: *4* write (g.FileLikeObject)
def write(self, s):
if s:
if g.isBytes(s):
s = g.toUnicode(s, self.encoding)
self.list.append(s)
#@-others
fileLikeObject = FileLikeObject
# For compatibility.
#@+node:ekr.20120123143207.10223: *3* class g.GeneralSetting & isGeneralSetting
# Important: The startup code uses this class,
# so it is convenient to define it in leoGlobals.py.
class GeneralSetting:
'''A class representing any kind of setting except shortcuts.'''
def __init__(self, kind,
encoding=None,
ivar=None,
setting=None,
val=None,
path=None,
tag='setting',
unl=None,
):
self.encoding = encoding
self.ivar = ivar
self.kind = kind
self.path = path
self.unl = unl
self.setting = setting
self.val = val
self.tag = tag
def __repr__(self):
result = ['GeneralSetting kind: %s' % (self.kind)]
ivars = ('ivar', 'path', 'setting', 'val', 'tag')
for ivar in ivars:
if hasattr(self, ivar):
val = getattr(self, ivar)
if val is not None:
result.append('%s: %s' % (ivar, val))
return ','.join(result)
dump = __repr__
def isGeneralSetting(obj):
return isinstance(obj, GeneralSetting)
#@+node:ekr.20120201164453.10090: *3* class g.KeyStroke & isStroke/OrNone
class KeyStroke:
'''
A class that represent any key stroke or binding.
stroke.s is the "canonicalized" stroke.
'''
#@+others
#@+node:ekr.20180414195401.2: *4* ks.__init__
def __init__(self, binding):
if binding:
self.s = self.finalize_binding(binding)
else:
self.s = None
#@+node:ekr.20120203053243.10117: *4* ks.__eq__, etc
#@+at All these must be defined in order to say, for example:
# for key in sorted(d)
# where the keys of d are KeyStroke objects.
#@@c
def __eq__(self, other):
if not other:
return False
if hasattr(other, 's'):
return self.s == other.s
return self.s == other
def __lt__(self, other):
if not other:
return False
if hasattr(other, 's'):
return self.s < other.s
return self.s < other
def __le__(self, other): return self.__lt__(other) or self.__eq__(other)
def __ne__(self, other): return not self.__eq__(other)
def __gt__(self, other): return not self.__lt__(other) and not self.__eq__(other)
def __ge__(self, other): return not self.__lt__(other)
#@+node:ekr.20120203053243.10118: *4* ks.__hash__
# Allow KeyStroke objects to be keys in dictionaries.
def __hash__(self):
return self.s.__hash__() if self.s else 0
#@+node:ekr.20120204061120.10067: *4* ks.__repr___ & __str__
def __str__(self):
return '<KeyStroke: %s>' % (repr(self.s))
__repr__ = __str__
#@+node:ekr.20180417160703.1: *4* ks.dump
def dump(self):
'''Show results of printable chars.'''
for i in range(128):
s = chr(i)
stroke = g.KeyStroke(s)
if stroke.s != s:
print('%2s %10r %r' % (i, s, stroke.s))
for ch in ('backspace', 'linefeed', 'return', 'tab'):
stroke = g.KeyStroke(ch)
print('%2s %10r %r' % ('', ch, stroke.s))
#@+node:ekr.20180415082249.1: *4* ks.finalize_binding
def finalize_binding(self, binding):
trace = False and 'keys' in g.app.debug
# This trace is good for devs only.
self.mods = self.find_mods(binding)
s = self.strip_mods(binding)
s = self.finalize_char(s)
# May change self.mods.
mods = ''.join(['%s+' % z.capitalize() for z in self.mods])
if trace and 'meta' in self.mods:
g.trace('%20s:%-20s ==> %s' % (binding, self.mods, mods+s))
return mods+s
#@+node:ekr.20180415083926.1: *4* ks.finalize_char & helper
def finalize_char(self, s):
'''Perform very-last-minute translations on bindings.'''
#
# Retain "bigger" spelling for gang-of-four bindings with modifiers.
shift_d = {
'bksp': 'BackSpace',
'backspace': 'BackSpace',
'backtab': 'Tab', # The shift mod will convert to 'Shift+Tab',
'linefeed': 'Return',
'\r': 'Return',
'return': 'Return',
'tab': 'Tab',
}
if self.mods and s.lower() in shift_d:
return shift_d.get(s.lower())
# Returning '' breaks existing code.
#
# Make all other translations...
#
# This dict ensures proper capitalization.
# It also translates legacy Tk binding names to ascii chars.
translate_d = {
#
# The gang of four...
'bksp': 'BackSpace',
'backspace': 'BackSpace',
'backtab': 'Tab', # The shift mod will convert to 'Shift+Tab',
'linefeed': '\n',
'\r': '\n',
'return': '\n',
'tab': 'Tab',
#
# Special chars...
'delete': 'Delete',
'down': 'Down',
'end': 'End',
'enter': 'Enter',
'escape': 'Escape',
'home': 'Home',
'insert': 'Insert',
'left':'Left',
'next': 'Next',
'prior': 'Prior',
'right': 'Right',
'up': 'Up',
#
# Qt key names...
'del': 'Delete',
'dnarrow': 'Down',
'esc': 'Escape',
'ins': 'Insert',
'ltarrow': 'Left',
'pagedn': 'Next',
'pageup': 'Prior',
'pgdown': 'Next',
'pgup': 'Prior',
'rtarrow': 'Right',
'uparrow': 'Up',
#
# Legacy Tk binding names...
"ampersand": "&",
"asciicircum": "^",
"asciitilde": "~",
"asterisk": "*",
"at": "@",
"backslash": "\\",
"bar": "|",
"braceleft": "{",
"braceright": "}",
"bracketleft": "[",
"bracketright": "]",
"colon": ":",
"comma": ",",
"dollar": "$",
"equal": "=",
"exclam": "!",
"greater": ">",
"less": "<",
"minus": "-",
"numbersign": "#",
"quotedbl": '"',
"quoteright": "'",
"parenleft": "(",
"parenright": ")",
"percent": "%",
"period": ".",
"plus": "+",
"question": "?",
"quoteleft": "`",
"semicolon": ";",
"slash": "/",
"space": " ",
"underscore": "_",
}
#
# pylint: disable=undefined-loop-variable
# Looks like a pylint bug.
if s in (None, 'none', 'None'):
return 'None'
if s.lower() in translate_d:
s | |
import argparse
import datasets as nlp
import sys
import os
import openai
import csv
import random
import time
from tqdm import tqdm
import ast
from datetime import datetime
import git
import copy
import itertools
import jsonlines
"""
Usage: python get_cose_gpt3_completion_accuracy.py [task_type] [model_engine] [num_samples_per_instance] [get_shuffled_examples (Bool)] [prime_source ("ours" or "dataset")] [testing] [total_n_from_training_data]
Example: python get_cose_gpt3_completion_accuracy.py jointBoth davinci 1 True ours False
"""
openai.api_key = os.getenv("OPENAI_API_KEY")
def complete_gpt3(prompt, args):
"""
This function extends 1 or more GPT-3 prompts
Inputs:
prompt: str or [str] is a prompt or list of prompts
generation_length: maximum length of output in tokens
model: GPT-3 version (davinci, curie, babbage, or ada)
num_log_probs: number k of top_k log_probabilities to include for each token gen
top_p: for nucleus sampling in generation
stop: stop token
echo: whether or not to include the input in the GPT-3 response
Output:
response: the raw response from GPT-3, with simple json format
note that the 'choices' field is where generations are in the format
[c_00, c01, ... cnm] where c_ij is the jth generation (based on input n)
of the ith prompt
Function modified from <NAME>
"""
# call GPT-3 API until result is provided and then return it
response = None
received = False
if not args.testing:
while not received:
try:
response = openai.Completion.create(
engine=args.model,
prompt=prompt,
temperature=args.temperature,
max_tokens=args.generation_length,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
stop=args.stop_token,
n=args.num_samples - 1, # remove 1 for greedy
echo=args.echo,
logprobs=args.logprobs,
)
received = True
except:
error = sys.exc_info()[0]
if (
error == openai.error.InvalidRequestError
): # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(0.2)
else:
response = {"choices": [{"text": "blah blah blah"}]}
return response
def complete_greedy(prompt, args):
# call GPT-3 API until result is provided and then return it
response = None
received = False
if not args.testing:
while not received:
try:
response = openai.Completion.create(
engine=args.model,
prompt=prompt,
temperature=0,
max_tokens=args.generation_length,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
stop=args.stop_token,
n=1,
echo=args.echo,
logprobs=args.logprobs,
)
received = True
except:
error = sys.exc_info()[0]
if (
error == openai.error.InvalidRequestError
): # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(0.2)
else:
response = {"choices": [{"text": "blah blah blah"}]}
return response
def get_completion_nll(choice):
"""
Given a ``choice'' field from a GPT-3 response, get
the negative log-likelihood of the full generation (including stop token)
Assumes this was called with echo=False (gets nll of full choice)
Function from <NAME>
"""
nll = None
try:
j = choice["logprobs"]["text_offset"].index(
max(choice["logprobs"]["text_offset"])
)
except:
try:
# if stop-token is not in list, get nll of full list
j = len(choice["logprobs"]["text_offset"])
except:
nll = "not computable"
if nll != "not computable":
# sum of log probs over the target tokens
nll = -sum(choice["logprobs"]["token_logprobs"][:j]) / len(
choice["logprobs"]["token_logprobs"][:j]
)
return nll
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--task_type",
type=str,
choices=[
"labelOnly",
"explanationOnly",
"jointBoth",
],
required=True,
)
parser.add_argument("--model", type=str, choices=["ada", "davinci"], required=True)
parser.add_argument("--num_samples", type=int, required=False, default=1)
parser.add_argument(
"--prime_source", type=str, choices=["ours", "dataset"], required=True
)
parser.add_argument(
"--split", type=str, choices=["validation", "test", "train"], required=True
)
parser.add_argument(
"--dataset", type=str, choices=["cose_v1.11", "ecqa"], required=True
)
parser.add_argument(
"--total_train_instances",
type=int,
required=False,
help="# of instances from training dataset to sample for (only used if dataset split == train",
)
parser.add_argument("--stop_token", type=str, required=False, default="###")
parser.add_argument("--generation_length", type=int, required=False, default=50)
parser.add_argument(
"--temperature", type=int, required=False, default=0
) # greedy sampling
parser.add_argument("--top_p", type=int, required=False, default=1)
parser.add_argument("--random_seed", type=int, required=False, default=10)
parser.add_argument("--frequency_penalty", type=int, required=False, default=0)
parser.add_argument("--presence_penalty", type=int, required=False, default=0)
parser.add_argument("--logprobs", type=int, required=False, default=0)
parser.add_argument("--echo", action="store_const", required=False, const=True)
parser.add_argument(
"--get_shuffled_examples", action="store_const", required=False, const=True
)
parser.add_argument("--testing", action="store_const", required=False, const=True)
args, _ = parser.parse_known_args()
args.command = " ".join(["python"] + sys.argv)
# check args
if args.get_shuffled_examples and args.task_type == "labelOnly":
raise Exception(
"can only produce explanations for judging if explanations part of prediction task"
)
# defines draw of *other* seeds
random.seed(args.random_seed)
# draw args.total_train_instances seeds
if args.split == "train":
# must specify # of samples
assert args.total_train_instances is not None
prompt_seeds = random.sample(
[i for i in range(10000)], args.total_train_instances
)
elif args.split == "test":
# use pre-specified number
args.total_train_instances = 250
prompt_seeds = random.sample(
[i for i in range(1000)], args.total_train_instances
)
elif args.split == "validation":
# use pre-specified number
args.total_train_instances = 115
prompt_seeds = random.sample(
[i for i in range(1000)], args.total_train_instances
)
if not args.testing:
# create a save directory
if not os.path.exists("./gpt3_outputs/"):
os.mkdir("./gpt3_outputs/")
save_path = f"./gpt3_outputs/{args.dataset}/"
save_dir = os.path.join(save_path, datetime.now().strftime("%m%d%y_%H%M%S"))
assert os.path.exists(save_path)
assert not os.path.exists(save_dir)
os.makedirs(save_dir)
# get git hash and branch where deployed
repo = git.Repo(search_parent_directories=True)
git_hash = repo.head.object.hexsha
git_branch = repo.active_branch.name
# write command to logfile
with open(os.path.join(save_dir, "commandline_args.txt"), "w") as f:
f.write("Git branch: " + git_branch + "\n")
f.write("Git hash: " + git_hash + "\n")
for key in args.__dict__:
f.write(f"{key}: {args.__dict__[key]}\n")
outfile = os.path.join(save_dir, "generations.csv")
if args.get_shuffled_examples:
outfile_shuff = os.path.join(save_dir, "shuffled_generations.csv")
g = open(outfile_shuff, "w")
shuff_writer = csv.writer(g)
shuff_writer.writerow(
[
"id",
"question",
"answer_choices",
"gold_label",
"source_sample_1",
"source_sample_2",
"nll_1",
"nll_2",
"explanation_1",
"explanation_2",
]
)
if args.prime_source == "dataset" and args.dataset == "ecqa":
ecqa_expls = {}
# load ECQA explanations from file
with jsonlines.open("./ecqa.jsonl") as f:
dataset = [obj for obj in f]
for it in dataset:
ecqa_expls[it["id"]] = it["explanation"]
# load full train+validation prime set
primes = []
prime_ids = []
with open(f"../data/handwritten_cose_v1.11_examples.csv", "r") as f:
reader = csv.DictReader(f)
for line in reader:
prime_ids.append(line["id"])
if args.prime_source == "dataset" and args.dataset == "cose_v1.11":
# load dataset explanations for primes
expl = line["orig_explanation"]
elif args.prime_source == "dataset" and args.dataset == "ecqa":
# get associated ECQA explanation
expl = ecqa_expls[line["id"]].replace("\n", " ")
else:
# load our hand-written explanations for primes
expl = line["our_explanation"]
primes.append(
{
"abstractive_explanation": expl,
"choices": ast.literal_eval(line["choices"]),
"answer": line["answer"],
"question": line["question"],
}
)
assert len(primes) == 115
if args.split == "test":
# load "test" set (that also has gold explanations)
ds = nlp.load_dataset("cos_e", "v1.11", split="validation")
ds = ds.shuffle(seed=10)
# select random subset of 250 instances
splits = ds.train_test_split(test_size=(250 / len(ds)))
dataset = splits["test"]
assert len(dataset) == 250
elif args.split == "validation":
# doing prompting LOO-style on primes themselves
dataset = primes
elif args.split == "train":
ds = nlp.load_dataset("cos_e", "v1.11", split="train[15:]")
ds = ds.shuffle(seed=10)
# select random subset of args.total_train_instances size
dataset = []
i = 0
while len(dataset) < args.total_train_instances:
# add elements
if ds[i]["id"] not in prime_ids:
dataset.append(ds[i])
i += 1
assert len(dataset) == args.total_train_instances
# write out for annotation
with open(outfile, "w") as f:
writer = csv.writer(f)
writer.writerow(
[
"id",
"sample_number",
"question",
"answer_choices",
"gold_label",
"predicted_label",
"gold_explanation",
"predicted_explanation",
"prediction_nll",
"num_primes",
"prompt",
]
)
expl_pairs = []
acc = []
for i, (new_seed, inst) in tqdm(
enumerate(zip(prompt_seeds, dataset)), total=len(dataset)
):
instance_pairs = []
# select prime indices
random.seed(new_seed)
if args.dataset == "ecqa":
# 24 ECQA primes is sometimes too long for the API context window (2049 tokens)
num_primes = random.choice([8, 16])
else:
num_primes = random.choice([8, 16, 24])
if args.split == "validation":
# remove self from primes list
partial_list = copy.deepcopy(primes)
partial_list.remove(inst)
selected_primes = random.sample(partial_list, num_primes)
else:
# select from all primes
selected_primes = random.sample(primes, num_primes)
random.shuffle(selected_primes)
# construct prompt shared by all variants of this instance
if args.task_type == "labelOnly":
prompt = "Let's perform a classification task.\n\n"
for item in selected_primes:
# shuffle answer choices
ch = random.sample(item["choices"], len(item["choices"]))
prompt += f"question: {item['question']}\n{', '.join(ch[:-1]) + ', or ' + ch[-1]}? {item['answer']}\n###\n"
elif args.task_type in {"explanationOnly", "jointBoth"}:
prompt = "Let's explain classification decisions.\n\n"
for item in selected_primes:
# shuffle answer choices
ch = random.sample(item["choices"], len(item["choices"]))
prompt += f"question: {item['question']}\n{', '.join(ch[:-1]) + ', or ' + ch[-1]}? {item['answer']}\nwhy? {item['abstractive_explanation']}\n###\n"
quest = inst["question"]
gold_e = inst["abstractive_explanation"]
gold_l = inst["answer"]
# shuffle answer choices
choices = random.sample(inst["choices"], len(inst["choices"]))
choice_string = ", ".join(choices[:-1]) + ", or " + choices[-1]
# reset seed (for good measure)
random.seed(args.random_seed)
# append instance to prompt
if args.task_type in {"labelOnly", "jointBoth"}:
prompt_list = [prompt + f"question: {quest}\n{choice_string}?"]
elif args.task_type == "explanationOnly":
prompt_list = [
prompt + f"question: {quest}\n{choice_string}? {gold_l}\nwhy?"
]
else:
raise Exception
if i == 0:
print("#######################")
print(prompt_list[0])
print("#######################")
assert len(prompt_list) == 1
for custom_prompt in prompt_list:
if args.num_samples > 1:
# perform GPT-3 inference to get predicted label and explanation
response = complete_gpt3(custom_prompt, args)
# iterates over n returned generations
for j, choice in enumerate(response["choices"]):
if args.task_type | |
2, 1, 9, 7],
[2, 5, 7, 9, 4, 8, 5, 7],
[6, 8, 10, 0, 4, 9, 8, 10],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[
[9, 4, 5, 8, 9, 6, 5, 8],
[7, 2, 9, 5, 4, 1, 7, 2],
[6, 3, 9, 2, 5, 2, 9, 2],
[3, 7, 5, 8, 9, 3, 7, 5],
],
dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1]})
x = tensor.to_consistent(placement, flow.sbp.split(0))
y = x.to_consistent(placement, flow.sbp.split(1))
new_placement = flow.placement(out_device, {0: [0, 1, 2, 3]})
z = y.to_consistent(new_placement, flow.sbp.split(1))
test_case.assertEqual(z.placement, new_placement)
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[[4, 6], [6, 8], [3, 7], [6, 8], [2, 10], [3, 9], [4, 6], [6, 8],],
dtype=np.float32,
),
)
)
elif flow.env.get_rank() == 1:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[
[5, 20],
[9, 0],
[5, 0],
[9, 0],
[10, 7],
[10, 5],
[6, 9],
[6, 4],
],
dtype=np.float32,
),
)
)
elif flow.env.get_rank() == 2:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[[8, 9], [4, 6], [3, 5], [8, 7], [10, 3], [5, 6], [8, 6], [5, 3],],
dtype=np.float32,
),
)
)
elif flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[
[5, 20],
[9, 0],
[0, 3],
[8, 9],
[10, 7],
[9, 10],
[6, 9],
[8, 6],
],
dtype=np.float32,
),
)
)
def _test_eager_boxing_with_same_placement_p_to_s1(test_case, in_device, out_device):
if flow.env.get_rank() == 0:
np_arr = np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
[6, 8, 9, 0, 4, 6],
[6, 8, 6, 4, 5, 3],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
[4, 9, 7, 0, 2, 1],
[6, 3, 9, 2, 5, 2],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[
[9, 6, 5, 8, 3, 6],
[4, 9, 7, 0, 2, 1],
[2, 5, 7, 9, 4, 8],
[6, 8, 10, 0, 4, 9],
[6, 3, 9, 2, 5, 2],
[2, 5, 7, 9, 4, 8],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
[7, 2, 9, 5, 4, 1],
[4, 9, 7, 0, 2, 1],
],
dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.partial_sum)
y = x.to_consistent(placement, flow.sbp.split(1))
test_case.assertEqual(y.placement, placement)
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[[15, 20], [16, 19], [13, 16], [15, 23], [17, 19], [16, 20],],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 1:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[[20, 35], [28, 10], [20, 11], [20, 12], [25, 5], [22, 6],],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[[27, 18], [13, 13], [16, 13], [22, 13], [10, 8], [12, 6],],
dtype=np.float32,
),
)
)
def _test_eager_boxing_with_same_placement_b_to_s1(test_case, in_device, out_device):
if flow.env.get_rank() == 0:
np_arr = np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
[6, 8, 9, 0, 4, 6],
[6, 8, 6, 4, 5, 3],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
[4, 9, 7, 0, 2, 1],
[6, 3, 9, 2, 5, 2],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[
[9, 6, 5, 8, 3, 6],
[4, 9, 7, 0, 2, 1],
[2, 5, 7, 9, 4, 8],
[6, 8, 10, 0, 4, 9],
[6, 3, 9, 2, 5, 2],
[2, 5, 7, 9, 4, 8],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
[7, 2, 9, 5, 4, 1],
[4, 9, 7, 0, 2, 1],
],
dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.broadcast)
y = x.to_consistent(placement, flow.sbp.split(1))
test_case.assertEqual(y.placement, placement)
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[[4, 6], [6, 8], [3, 7], [6, 8], [6, 8], [6, 8],], dtype=np.float32,
),
)
)
if flow.env.get_rank() == 1:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[[5, 20], [9, 0], [5, 0], [9, 0], [9, 0], [6, 4],],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[[8, 9], [4, 6], [3, 5], [8, 7], [4, 6], [5, 3],], dtype=np.float32,
),
)
)
def _test_eager_boxing_with_same_placement_s0_to_s1(test_case, in_device, out_device):
if flow.env.get_rank() == 0:
np_arr = np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[
[9, 6, 5, 8, 3, 6],
[4, 9, 7, 0, 2, 1],
[2, 5, 7, 9, 4, 8],
[6, 8, 10, 0, 4, 9],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
],
dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.split(0))
y = x.to_consistent(placement, flow.sbp.split(1))
test_case.assertEqual(y.placement, placement)
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[
[4, 6],
[6, 8],
[3, 7],
[6, 8],
[2, 10],
[3, 9],
[4, 6],
[6, 8],
[9, 4],
[7, 2],
[6, 3],
[3, 7],
],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 1:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[
[5, 20],
[9, 0],
[5, 0],
[9, 0],
[10, 7],
[10, 5],
[6, 9],
[6, 4],
[5, 8],
[9, 5],
[9, 2],
[5, 8],
],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[
[8, 9],
[4, 6],
[3, 5],
[8, 7],
[10, 3],
[5, 6],
[8, 6],
[5, 3],
[9, 6],
[4, 1],
[5, 2],
[9, 3],
],
dtype=np.float32,
),
)
)
def _test_eager_boxing_with_same_placement_s1_to_s1(test_case, in_device, out_device):
if flow.env.get_rank() == 0:
np_arr = np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[
[9, 6, 5, 8, 3, 6],
[4, 9, 7, 0, 2, 1],
[2, 5, 7, 9, 4, 8],
[6, 8, 10, 0, 4, 9],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
],
dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.split(0))
y = x.to_consistent(placement, flow.sbp.split(1))
z = y.to_consistent(placement, flow.sbp.split(1))
test_case.assertEqual(z.placement, placement)
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[
[4, 6],
[6, 8],
[3, 7],
[6, 8],
[2, 10],
[3, 9],
[4, 6],
[6, 8],
[9, 4],
[7, 2],
[6, 3],
[3, | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding, FastSinusoidalPositionalEncoding
from onmt.models.transformer_layers import PrePostProcessing
from .relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from .reversible_transformers import ReversibleTransformerEncoderLayer, reversible_encoder
from .reversible_transformers import ReversibleTransformerDecoderLayer, reversible_decoder
from onmt.modules.identity import Identity
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from torch.utils.checkpoint import checkpoint
torch.set_printoptions(threshold=500000)
def create_forward_function(module):
def forward_pass(*inputs):
return module(*inputs)
return forward_pass
class RelativeTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.unidirectional = opt.unidirectional
self.n_heads = opt.n_heads
self.n_languages = opt.n_languages
self.checkpointing = opt.checkpointing
self.absolute_position_encoding = opt.absolute_position_encoding
self.early_emb_scale = opt.encoder_early_emb_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
self.reversible = opt.src_reversible
# build_modules will be called from the inherited constructor
super(RelativeTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
if not self.early_emb_scale and (self.use_language_embedding or self.absolute_position_encoding):
print("[INFO] Embedding will be scaled after being added with embedding and position encoding."
"\n[INFO] For multilingual models its advisable to use -encoder_early_emb_scale")
# learnable position encoding
if self.learnable_position_encoding:
self.positional_encoder = None
else:
if not self.absolute_position_encoding:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
else:
self.positional_encoder = FastSinusoidalPositionalEncoding(opt.model_size)
if opt.rezero or opt.post_norm:
self.postprocess_layer = Identity()
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Relative Reversible Encoder with %.2f expected layers" % e_length)
else:
print("* Relative Translation Encoder with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if self.reversible:
block = ReversibleTransformerEncoderLayer(self.opt, death_rate=death_r)
else:
block = RelativeTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
bsz_first_input = input
input = input.transpose(0, 1)
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
mem_len = 0
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.early_emb_scale:
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(0)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if not self.absolute_position_encoding:
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device).long()
# pos.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
# pos_emb = pos.unsqueeze(1)
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # 1 x src_len x batch_size for broadcasting
else:
# Absolute position encoding from 0 -> n
pos, pos_emb = None, None
emb = self.positional_encoder(emb.transpose(0, 1)).transpose(0, 1)
mask_src = bsz_first_input.eq(onmt.constants.PAD) # batch_size x src_len
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
if not self.early_emb_scale:
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
# context size is now T x B x H
context = self.preprocess_layer(emb)
if self.reversible:
context = reversible_encoder(self.layer_modules, context, pos_emb, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
if self.checkpointing == 0 or self.training is False:
context = layer(context, pos_emb, mask_src, src_lang=input_lang)
else:
context = checkpoint(create_forward_function(layer), context, pos_emb, mask_src, input_lang)
# final layer norm. we can consider this layer norm as a part of the output layer/function
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
return output_dict
class RelativeTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.n_heads = opt.n_heads
self.checkpointing = opt.checkpointing
self.absolute_position_encoding = opt.absolute_position_encoding
self.late_emb_scale = opt.decoder_late_emb_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
self.reversible = opt.tgt_reversible
# build_modules will be called from the inherited constructor
super(RelativeTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
if self.learnable_position_encoding:
self.positional_encoder = None
else:
if not self.absolute_position_encoding:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
else:
self.positional_encoder = FastSinusoidalPositionalEncoding(opt.model_size)
self.d_head = self.model_size // self.n_heads
if opt.rezero or opt.post_norm:
self.postprocess_layer = Identity()
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
if self.reversible:
print("* Transformer Reversible Decoder with Relative Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = RelativeTransformerDecoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerDecoderLayer(self.opt)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, src_lang=None, tgt_lang=None,
streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if not self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is left aligned so we do not need to add the pad mask
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
# pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
#
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
# dec_attn_mask = dec_attn_mask.gt(0)
dec_attn_mask = dec_attn_mask.bool()
if not self.absolute_position_encoding:
# relative positions
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype).long()
# pos.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
# pos_emb = pos.unsqueeze(1)
else:
# absolute positions
emb = self.positional_encoder(emb.transpose(0, 1)).transpose(0, 1)
pos, pos_emb = None, None
dec_attn_mask = dec_attn_mask.squeeze(-1)
if self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
output = self.preprocess_layer(emb.contiguous())
if self.reversible:
# TODO: add src lang and tgt lang to reversible
output, coverage = reversible_decoder(self.layer_modules, output, pos_emb, context,
dec_attn_mask.squeeze(-1), mask_src,
False, None) # incremental variables
else:
for i, layer in enumerate(self.layer_modules):
if self.checkpointing == 0 or self.training is False:
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src,
src_lang=src_lang, tgt_lang=tgt_lang)
else:
output, coverage, _ = checkpoint(create_forward_function(layer), output, context, pos_emb,
dec_attn_mask,
mask_src, src_lang, tgt_lang)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq | |
target, GLenum format, GLenum type, void *row, void *column, void *span);
void glDeleteProgram(GLuint program);
void glColor4bv(const GLbyte *v);
void glRasterPos2f(GLfloat x, GLfloat y);
void glNamedBufferPageCommitmentARB(GLuint buffer, GLintptr offset, GLsizeiptr size, GLboolean commit);
void glLoadIdentity();
void glProgramParameter4dNV(GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
void glRasterPos4iv(const GLint *v);
void glUniformMatrix4x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glClearBufferfv(GLenum buffer, GLint drawbuffer, const GLfloat *value);
void glVertexPointerEXT(GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
void glMultiTexCoord4hNV(GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
void glTextureBarrier();
void glAlphaFragmentOp3ATI(GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod);
void glDeleteAsyncMarkersSGIX(GLuint marker, GLsizei range);
void glTexCoord2bvOES(const GLbyte *coords);
void glEvalCoord2xOES(GLfixed u, GLfixed v);
void glGetSharpenTexFuncSGIS(GLenum target, GLfloat *points);
void glDebugMessageInsertKHR(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
void glIglooInterfaceSGIX(GLenum pname, const void *params);
void glClearBufferfi(GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);
void glDrawArraysIndirect(GLenum mode, const void *indirect);
void glGenVertexArrays(GLsizei n, GLuint *arrays);
void glEnableVertexArrayAttrib(GLuint vaobj, GLuint index);
void glProgramUniformMatrix3x2dv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
void glVertexBindingDivisor(GLuint bindingindex, GLuint divisor);
void glGetSamplerParameterIiv(GLuint sampler, GLenum pname, GLint *params);
void glGetCoverageModulationTableNV(GLsizei bufsize, GLfloat *v);
void glUniformMatrix4x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glVertexAttrib3f(GLuint index, GLfloat x, GLfloat y, GLfloat z);
void glMultiDrawRangeElementArrayAPPLE(GLenum mode, GLuint start, GLuint end, const GLint *first, const GLsizei *count, GLsizei primcount);
void glVertexAttribFormatNV(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride);
void glGetQueryBufferObjecti64v(GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
void glGetVertexAttribdv(GLuint index, GLenum pname, GLdouble *params);
void glStartInstrumentsSGIX();
GLboolean glAreTexturesResidentEXT(GLsizei n, const GLuint *textures, GLboolean *residences);
void glGetFloatIndexedvEXT(GLenum target, GLuint index, GLfloat *data);
void glVideoCaptureStreamParameterdvNV(GLuint video_capture_slot, GLuint stream, GLenum pname, const GLdouble *params);
void glMapParameterivNV(GLenum target, GLenum pname, const GLint *params);
void glSecondaryColor3sEXT(GLshort red, GLshort green, GLshort blue);
void glGetTexParameterIivEXT(GLenum target, GLenum pname, GLint *params);
void glFrameTerminatorGREMEDY();
void glBlendBarrierKHR();
void glVertexAttrib4NubARB(GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
void glPrimitiveRestartNV();
void glProgramNamedParameter4fvNV(GLuint id, GLsizei len, const GLubyte *name, const GLfloat *v);
void glVertexAttribL1ui64vARB(GLuint index, const GLuint64EXT *v);
void glPointParameterfvARB(GLenum pname, const GLfloat *params);
void glVertexAttribs4svNV(GLuint index, GLsizei count, const GLshort *v);
void glUniform1ui(GLint location, GLuint v0);
void glVertexAttrib2fvARB(GLuint index, const GLfloat *v);
void glBlitNamedFramebuffer(GLuint readFramebuffer, GLuint drawFramebuffer, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
void glVertexAttrib3d(GLuint index, GLdouble x, GLdouble y, GLdouble z);
void glTextureImage3DMultisampleCoverageNV(GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations);
void glMemoryBarrier(GLbitfield barriers);
void glGetVariantArrayObjectfvATI(GLuint id, GLenum pname, GLfloat *params);
void glTexCoord4fColor4fNormal3fVertex4fvSUN(const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
GLboolean glIsProgramARB(GLuint program);
void glBindImageTextureEXT(GLuint index, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLint format);
void glSampleCoveragexOES(GLclampx value, GLboolean invert);
GLint glGetFragDataLocation(GLuint program, const GLchar *name);
void glMultiTexCoord1svARB(GLenum target, const GLshort *v);
void glGetMapxvOES(GLenum target, GLenum query, GLfixed *v);
void glTextureStorage2DMultisampleEXT(GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
void glGetMaterialfv(GLenum face, GLenum pname, GLfloat *params);
void glVertexAttrib4NbvARB(GLuint index, const GLbyte *v);
void glPixelMapuiv(GLenum map, GLsizei mapsize, const GLuint *values);
void glColorPointerEXT(GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
void glReplacementCodeuiColor4fNormal3fVertex3fSUN(GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
void glEnableClientStateiEXT(GLenum array, GLuint index);
void glClearTexSubImage(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data);
void glEvalCoord1xvOES(const GLfixed *coords);
void glDetachObjectARB(GLhandleARB containerObj, GLhandleARB attachedObj);
void glGetTextureParameterIiv(GLuint texture, GLenum pname, GLint *params);
void glVariantusvEXT(GLuint id, const GLushort *addr);
void glCompressedTextureImage3DEXT(GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *bits);
void glMultiTexCoord1dARB(GLenum target, GLdouble s);
void glGetVertexArrayIntegeri_vEXT(GLuint vaobj, GLuint index, GLenum pname, GLint *param);
void glVertexAttribI4ubv(GLuint index, const GLubyte *v);
void glPixelTexGenParameterfSGIS(GLenum pname, GLfloat param);
void glProgramUniformMatrix4x2dv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
void glUniform3fARB(GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
GLboolean glIsShader(GLuint shader);
void glConvolutionParameteriv(GLenum target, GLenum pname, const GLint *params);
void glCopyMultiTexSubImage2DEXT(GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
void glEnableVertexAttribArrayARB(GLuint index);
void glEnable(GLenum cap);
void glGetActiveUniformsiv(GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params);
void glEdgeFlagPointerListIBM(GLint stride, const GLboolean **pointer, GLint ptrstride);
void glTexCoord4hvNV(const GLhalfNV *v);
void glBlendEquationi(GLuint buf, GLenum mode);
GLint glGetAttribLocation(GLuint program, const GLchar *name);
void glVertexAttrib4dv(GLuint index, const GLdouble *v);
void glGetTextureParameteriv(GLuint texture, GLenum pname, GLint *params);
void glPathSubCoordsNV(GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords);
void glMatrixIndexusvARB(GLint size, const GLushort *indices);
void glGetVideouivNV(GLuint video_slot, GLenum pname, GLuint *params);
void glNamedProgramLocalParametersI4uivEXT(GLuint program, GLenum target, GLuint index, GLsizei count, const GLuint *params);
void glProgramUniform3ui(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
void glVertexAttrib3svARB(GLuint index, const GLshort *v);
void glGetNamedBufferParameterivEXT(GLuint buffer, GLenum pname, GLint *params);
void glGenProgramPipelinesEXT(GLsizei n, GLuint *pipelines);
void glProgramUniformMatrix2x3fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetFragmentLightivSGIX(GLenum light, GLenum pname, GLint *params);
void glUnmapTexture2DINTEL(GLuint texture, GLint level);
void glVertexAttrib2svNV(GLuint index, const GLshort *v);
void glWindowPos2ivARB(const GLint *v);
void glGetVertexAttribPointervNV(GLuint index, GLenum pname, void **pointer);
void glPushMatrix();
void glGetVertexAttribivARB(GLuint index, GLenum pname, GLint *params);
void glGenerateMipmapEXT(GLenum target);
void glProgramUniformMatrix2x4dvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
void glPathCoordsNV(GLuint path, GLsizei numCoords, GLenum coordType, const void *coords);
void glProgramUniform1i(GLuint program, GLint location, GLint v0);
void glProgramUniformMatrix4x3dvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
void glProgramUniform1d(GLuint program, GLint location, GLdouble v0);
void glProgramUniform1f(GLuint program, GLint location, GLfloat v0);
void glProgramParameteriEXT(GLuint program, GLenum pname, GLint value);
void glCompressedMultiTexImage2DEXT(GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *bits);
void glProgramUniform3iv(GLuint program, GLint location, GLsizei count, const GLint *value);
void glIndexiv(const GLint *c);
void glMultiTexCoord4xvOES(GLenum texture, const GLfixed *coords);
void glTransformFeedbackBufferRange(GLuint xfb, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
void glPixelZoom(GLfloat xfactor, GLfloat yfactor);
GLenum glVideoCaptureNV(GLuint video_capture_slot, GLuint *sequence_num, GLuint64EXT *capture_time);
void glVertex3bvOES(const GLbyte *coords);
void glFramebufferReadBufferEXT(GLuint framebuffer, GLenum mode);
void glExtractComponentEXT(GLuint res, GLuint src, GLuint num);
void glTexCoord2fColor3fVertex3fvSUN(const GLfloat *tc, const GLfloat *c, const GLfloat *v);
void glMinmax(GLenum target, GLenum internalformat, GLboolean sink);
void glColorP3ui(GLenum type, GLuint color);
void glPointParameterfvSGIS(GLenum pname, const GLfloat *params);
void glUseProgramObjectARB(GLhandleARB programObj);
void glFogCoorddEXT(GLdouble coord);
void glVertexAttrib4fvNV(GLuint index, const GLfloat *v);
void glFragmentLightiSGIX(GLenum light, GLenum pname, GLint param);
void glMultiTexCoord1bvOES(GLenum texture, const GLbyte *coords);
void glSecondaryColorPointerEXT(GLint size, GLenum type, GLsizei stride, const void *pointer);
void glMultiTexCoordP4uiv(GLenum texture, GLenum type, const GLuint *coords);
void glGetBufferSubDataARB(GLenum target, GLintptrARB offset, GLsizeiptrARB size, void *data);
void glGetPathParameterivNV(GLuint path, GLenum pname, GLint *value);
void glTextureView(GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
void glDisableVertexArrayAttrib(GLuint vaobj, GLuint index);
void glElementPointerATI(GLenum type, const void *pointer);
void glGetAttachedObjectsARB(GLhandleARB containerObj, GLsizei maxCount, GLsizei *count, GLhandleARB *obj);
void glUniform4iv(GLint location, GLsizei count, const GLint *value);
void glFogxOES(GLenum pname, GLfixed param);
void glSharpenTexFuncSGIS(GLenum target, GLsizei n, const GLfloat *points);
void glClearDepthfOES(GLclampf depth);
void glDeleteCommandListsNV(GLsizei n, const GLuint *lists);
void glVertex4hNV(GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w);
void glSecondaryColor3dvEXT(const GLdouble *v);
void glGenTextures(GLsizei n, GLuint *textures);
void glTextureStorage2DMultisample(GLuint texture, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
void glUniform3uivEXT(GLint location, GLsizei count, const GLuint *value);
void glUniform3ui64vNV(GLint location, GLsizei count, const GLuint64EXT *value);
void glBlendFuncSeparateIndexedAMD(GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
void glMakeBufferResidentNV(GLenum target, GLenum access);
void glShaderOp2EXT(GLenum op, GLuint res, GLuint arg1, GLuint arg2);
void glActiveTextureARB(GLenum texture);
void glTexParameterIivEXT(GLenum target, GLenum pname, const GLint *params);
void glGetPerfQueryDataINTEL(GLuint queryHandle, GLuint flags, GLsizei dataSize, GLvoid *data, GLuint *bytesWritten);
void glNamedFramebufferTexture1DEXT(GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
void glTextureSubImage1DEXT(GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
void glMultiTexCoord1s(GLenum target, GLshort s);
void glDispatchComputeGroupSizeARB(GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z, GLuint group_size_x, GLuint group_size_y, GLuint group_size_z);
void glPathParameterfNV(GLuint path, GLenum pname, GLfloat value);
void glVertexAttrib2dvARB(GLuint index, const GLdouble *v);
void glVertexAttribPointer(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
void glDepthBoundsdNV(GLdouble zmin, GLdouble zmax);
void glDeleteTexturesEXT(GLsizei n, const GLuint *textures);
void glDrawBuffersATI(GLsizei n, const GLenum *bufs);
void glLightModelxvOES(GLenum pname, const GLfixed *param);
void glApplyTextureEXT(GLenum mode);
void glVertexStream1dvATI(GLenum stream, const GLdouble *coords);
void glGetMinmax(GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
void glGetFixedvOES(GLenum pname, GLfixed *params);
void glSamplePatternEXT(GLenum pattern);
void glUniform1f(GLint location, GLfloat v0);
void glColor4fNormal3fVertex3fSUN(GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
void glFogCoorddvEXT(const GLdouble *coord);
void glCopyTextureImage1DEXT(GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
void glDeleteFencesNV(GLsizei n, const GLuint *fences);
void glProgramUniform1ivEXT(GLuint program, GLint | |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import sys
import shutil
import copy
import socket
import ruamel.yaml
import six
from ordereddict_backport import OrderedDict
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.tty.color import colorize
import spack.error
import spack.hash_types as ht
import spack.repo
import spack.schema.env
import spack.spec
import spack.util.spack_json as sjson
import spack.config
from spack.filesystem_view import YamlFilesystemView
from spack.util.environment import EnvironmentModifications
import spack.architecture as architecture
from spack.spec import Spec
from spack.spec_list import SpecList, InvalidSpecConstraintError
from spack.variant import UnknownVariantError
#: environment variable used to indicate the active environment
spack_env_var = 'SPACK_ENV'
#: currently activated environment
_active_environment = None
#: path where environments are stored in the spack tree
env_path = os.path.join(spack.paths.var_path, 'environments')
#: Name of the input yaml file for an environment
manifest_name = 'spack.yaml'
#: Name of the input yaml file for an environment
lockfile_name = 'spack.lock'
#: Name of the directory where environments store repos, logs, views
env_subdir_name = '.spack-env'
#: default spack.yaml file to put in new environments
default_manifest_yaml = """\
# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
# configuration settings.
spack:
# add package specs to the `specs` list
specs:
-
view: true
"""
#: regex for validating enviroment names
valid_environment_name_re = r'^\w[\w-]*$'
#: version of the lockfile format. Must increase monotonically.
lockfile_format_version = 2
#: legal first keys in the spack.yaml manifest file
env_schema_keys = ('spack', 'env')
# Magic names
# The name of the standalone spec list in the manifest yaml
user_speclist_name = 'specs'
# The name of the default view (the view loaded on env.activate)
default_view_name = 'default'
# Default behavior to link all packages into views (vs. only root packages)
default_view_link = 'all'
def valid_env_name(name):
return re.match(valid_environment_name_re, name)
def validate_env_name(name):
if not valid_env_name(name):
raise ValueError((
"'%s': names must start with a letter, and only contain "
"letters, numbers, _, and -.") % name)
return name
def activate(
env, use_env_repo=False, add_view=True, shell='sh', prompt=None
):
"""Activate an environment.
To activate an environment, we add its configuration scope to the
existing Spack configuration, and we set active to the current
environment.
Arguments:
env (Environment): the environment to activate
use_env_repo (bool): use the packages exactly as they appear in the
environment's repository
add_view (bool): generate commands to add view to path variables
shell (string): One of `sh`, `csh`.
prompt (string): string to add to the users prompt, or None
Returns:
cmds: Shell commands to activate environment.
TODO: environment to use the activated spack environment.
"""
global _active_environment
_active_environment = env
prepare_config_scope(_active_environment)
if use_env_repo:
spack.repo.path.put_first(_active_environment.repo)
tty.debug("Using environmennt '%s'" % _active_environment.name)
# Construct the commands to run
cmds = ''
if shell == 'csh':
# TODO: figure out how to make color work for csh
cmds += 'setenv SPACK_ENV %s;\n' % env.path
cmds += 'alias despacktivate "spack env deactivate";\n'
if prompt:
cmds += 'if (! $?SPACK_OLD_PROMPT ) '
cmds += 'setenv SPACK_OLD_PROMPT "${prompt}";\n'
cmds += 'set prompt="%s ${prompt}";\n' % prompt
else:
if os.getenv('TERM') and 'color' in os.getenv('TERM') and prompt:
prompt = colorize('@G{%s} ' % prompt, color=True)
cmds += 'export SPACK_ENV=%s;\n' % env.path
cmds += "alias despacktivate='spack env deactivate';\n"
if prompt:
cmds += 'if [ -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ -z ${PS1+x} ]; then\n'
cmds += " PS1='$$$$';\n"
cmds += ' fi;\n'
cmds += ' export SPACK_OLD_PS1="${PS1}";\n'
cmds += 'fi;\n'
cmds += 'export PS1="%s ${PS1}";\n' % prompt
if add_view and default_view_name in env.views:
cmds += env.add_default_view_to_shell(shell)
return cmds
def deactivate(shell='sh'):
"""Undo any configuration or repo settings modified by ``activate()``.
Arguments:
shell (string): One of `sh`, `csh`. Shell style to use.
Returns:
(string): shell commands for `shell` to undo environment variables
"""
global _active_environment
if not _active_environment:
return
deactivate_config_scope(_active_environment)
# use _repo so we only remove if a repo was actually constructed
if _active_environment._repo:
spack.repo.path.remove(_active_environment._repo)
cmds = ''
if shell == 'csh':
cmds += 'unsetenv SPACK_ENV;\n'
cmds += 'if ( $?SPACK_OLD_PROMPT ) '
cmds += 'set prompt="$SPACK_OLD_PROMPT" && '
cmds += 'unsetenv SPACK_OLD_PROMPT;\n'
cmds += 'unalias despacktivate;\n'
else:
cmds += 'if [ ! -z ${SPACK_ENV+x} ]; then\n'
cmds += 'unset SPACK_ENV; export SPACK_ENV;\n'
cmds += 'fi;\n'
cmds += 'unalias despacktivate;\n'
cmds += 'if [ ! -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ "$SPACK_OLD_PS1" = \'$$$$\' ]; then\n'
cmds += ' unset PS1; export PS1;\n'
cmds += ' else\n'
cmds += ' export PS1="$SPACK_OLD_PS1";\n'
cmds += ' fi;\n'
cmds += ' unset SPACK_OLD_PS1; export SPACK_OLD_PS1;\n'
cmds += 'fi;\n'
if default_view_name in _active_environment.views:
cmds += _active_environment.rm_default_view_from_shell(shell)
tty.debug("Deactivated environmennt '%s'" % _active_environment.name)
_active_environment = None
return cmds
def find_environment(args):
"""Find active environment from args, spack.yaml, or environment variable.
This is called in ``spack.main`` to figure out which environment to
activate.
Check for an environment in this order:
1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments)
2. as a spack.yaml file in the current directory, or
3. via a path in the SPACK_ENV environment variable.
If an environment is found, read it in. If not, return None.
Arguments:
args (Namespace): argparse namespace wtih command arguments
Returns:
(Environment): a found environment, or ``None``
"""
# try arguments
env = getattr(args, 'env', None)
# treat env as a name
if env:
if exists(env):
return read(env)
else:
# if env was specified, see if it is a dirctory otherwise, look
# at env_dir (env and env_dir are mutually exclusive)
env = getattr(args, 'env_dir', None)
# if no argument, look for a manifest file
if not env:
if os.path.exists(manifest_name):
env = os.getcwd()
# if no env, env_dir, or manifest try the environment
if not env:
env = os.environ.get(spack_env_var)
# nothing was set; there's no active environment
if not env:
return None
# if we get here, env isn't the name of a spack environment; it has
# to be a path to an environment, or there is something wrong.
if is_env_dir(env):
return Environment(env)
raise SpackEnvironmentError('no environment in %s' % env)
def get_env(args, cmd_name, required=False):
"""Used by commands to get the active environment.
This first checks for an ``env`` argument, then looks at the
``active`` environment. We check args first because Spack's
subcommand arguments are parsed *after* the ``-e`` and ``-D``
arguments to ``spack``. So there may be an ``env`` argument that is
*not* the active environment, and we give it precedence.
This is used by a number of commands for determining whether there is
an active environment.
If an environment is not found *and* is required, print an error
message that says the calling command *needs* an active environment.
Arguments:
args (Namespace): argparse namespace wtih command arguments
cmd_name (str): name of calling command
required (bool): if ``True``, raise an exception when no environment
is found; if ``False``, just return ``None``
Returns:
(Environment): if there is an arg or active environment
"""
# try argument first
env = getattr(args, 'env', None)
if env:
if exists(env):
return read(env)
elif is_env_dir(env):
return Environment(env)
else:
raise SpackEnvironmentError('no environment in %s' % env)
# try the active environment. This is set by find_environment() (above)
if _active_environment:
return _active_environment
elif not required:
return None
else:
tty.die(
'`spack %s` requires an environment' % cmd_name,
'activate an environment first:',
' spack env activate ENV',
'or use:',
' spack -e ENV %s ...' % cmd_name)
def _root(name):
"""Non-validating version of root(), to be used internally."""
return os.path.join(env_path, name)
def root(name):
"""Get the root directory for an environment by name."""
validate_env_name(name)
return _root(name)
def exists(name):
"""Whether an environment with this name exists or not."""
if not valid_env_name(name):
return False
return os.path.isdir(root(name))
def active(name):
"""True if the named environment is active."""
return _active_environment and name == _active_environment.name
def is_env_dir(path):
"""Whether a directory contains a spack environment."""
return os.path.isdir(path) and os.path.exists(
os.path.join(path, manifest_name))
def read(name):
"""Get an environment with the supplied name."""
validate_env_name(name)
if not exists(name):
raise SpackEnvironmentError("no such environment '%s'" % name)
return Environment(root(name))
def create(name, init_file=None, with_view=None):
"""Create a named environment in Spack."""
validate_env_name(name)
if exists(name):
raise SpackEnvironmentError("'%s': environment already exists" % name)
return Environment(root(name), init_file, with_view)
def config_dict(yaml_data):
"""Get the configuration scope section out of an spack.yaml"""
key = spack.config.first_existing(yaml_data, env_schema_keys)
return yaml_data[key]
def all_environment_names():
"""List the names of environments that currently exist."""
# just return empty if the env | |
dump_memory=False, quiesce=False):
"""Create a snapshot for a given machine"""
return self.connection.ex_create_snapshot(
node, snapshot_name, description,
dump_memory=dump_memory, quiesce=quiesce)
def _revert_machine_to_snapshot(self, machine, node,
snapshot_name=None):
"""Revert a given machine to a previous snapshot"""
return self.connection.ex_revert_to_snapshot(node,
snapshot_name)
def _remove_machine_snapshot(self, machine, node,
snapshot_name=None):
"""Removes a given machine snapshot"""
return self.connection.ex_remove_snapshot(node,
snapshot_name)
def _list_machine_snapshots(self, machine, node):
return self.connection.ex_list_snapshots(node)
def _list_images__fetch_images(self, search=None):
image_folders = []
if config.VSPHERE_IMAGE_FOLDERS:
image_folders = config.VSPHERE_IMAGE_FOLDERS
image_list = self.connection.list_images(folder_ids=image_folders)
# Check for templates without uuid
for image in image_list[:]:
if image.id is None:
log.error("Skipping machine {} on cloud {} - {}): uuid is "
"null".format(image.name,
self.cloud.title,
self.cloud.id))
image_list.remove(image)
return image_list
def _clone_machine(self, machine, node, name, resume):
locations = self.connection.list_locations()
node_location = None
if not machine.location:
vm = self.connection.find_by_uuid(node.id)
location_id = vm.summary.runtime.host.name
else:
location_id = machine.location.external_id
for location in locations:
if location.id == location_id:
node_location = location
break
folder = node.extra.get('folder', None)
if not folder:
try:
folder = vm.parent._moId
except Exception as exc:
raise BadRequestError(
"Failed to find folder the folder containing the machine")
log.error(
"Clone Machine: Exception when "
"looking for folder: {}".format(exc))
datastore = node.extra.get('datastore', None)
return self.connection.create_node(name=name, image=node,
size=node.size,
location=node_location,
ex_folder=folder,
ex_datastore=datastore)
def _get_libcloud_node(self, machine):
vm = self.connection.find_by_uuid(machine.machine_id)
return self.connection._to_node_recursive(vm)
def _generate_plan__parse_networks(self, auth_context, networks_dict,
location):
try:
network_search = networks_dict['network']
except KeyError:
return None
from mist.api.methods import list_resources
try:
[network], _ = list_resources(auth_context,
'network',
search=network_search,
cloud=self.cloud.id,
limit=1)
except ValueError:
raise BadRequestError(f'Network: {network_search} not found')
return {
'id': network.id,
'name': network.name,
'external_id': network.network_id,
}
def _generate_plan__parse_extra(self, extra, plan):
folder = extra.get('folder')
if folder:
folders = self.connection.ex_list_folders()
folder_dict = next((item for item in folders
if (item.get('id') == folder or
item.get('name') == folder)),
None)
if folder_dict is None:
raise NotFoundError(
f'Folder: {folder} not found')
folder_features = folder_dict.get('type') or []
if 'VirtualMachine' not in folder_features:
raise BadRequestError(
f'Folder: {folder} does not support machine provisioning'
)
plan['folder'] = {
'id': folder_dict['id'],
'name': folder_dict['name'],
}
datastore = extra.get('datastore')
if datastore:
datastores = self.connection.ex_list_datastores()
datastore_dict = next((item for item in datastores
if (item.get('id') == datastore or
item.get('name') == datastore)),
None)
if datastore_dict is None:
raise NotFoundError(
f'Datastore: {datastore} not found'
)
plan['datastore'] = {
'id': datastore_dict['id'],
'name': datastore_dict['name'],
}
def _create_machine__get_size_object(self, size):
# even though vsphere has custom sizes `create_node`
# expects a libcloud NodeSize object. Create a dummy
# one with only the attributes necessary
from libcloud.compute.base import NodeSize
nodesize = NodeSize(id=None,
name=None,
ram=size['ram'],
disk=None,
bandwidth=None,
price=None,
driver=self.connection,
extra={
'cpus': size['cpus']
})
return nodesize
def _create_machine__compute_kwargs(self, plan):
kwargs = super()._create_machine__compute_kwargs(plan)
try:
kwargs['ex_network'] = plan['networks']['external_id']
except KeyError:
kwargs['ex_network'] = None
try:
kwargs['ex_folder'] = plan['folder']['id']
except KeyError:
kwargs['ex_folder'] = None
try:
kwargs['ex_datastore'] = plan['datastore']['id']
except KeyError:
kwargs['ex_datastore'] = None
return kwargs
class OpenStackComputeController(BaseComputeController):
def _connect(self, **kwargs):
url = dnat(self.cloud.owner, self.cloud.url)
return get_driver(Provider.OPENSTACK)(
self.cloud.username,
self.cloud.password,
api_version='2.2',
ex_force_auth_version='3.x_password',
ex_tenant_name=self.cloud.tenant,
ex_force_service_region=self.cloud.region,
ex_force_base_url=self.cloud.compute_endpoint,
ex_auth_url=url,
ex_domain_name=self.cloud.domain or 'Default'
)
def _list_machines__machine_creation_date(self, machine, node_dict):
return node_dict['extra'].get('created') # iso8601 string
def _list_machines__machine_actions(self, machine, node_dict):
super(OpenStackComputeController,
self)._list_machines__machine_actions(machine, node_dict)
machine.actions.rename = True
machine.actions.resize = True
def _resize_machine(self, machine, node, node_size, kwargs):
try:
self.connection.ex_resize(node, node_size)
except Exception as exc:
raise BadRequestError('Failed to resize node: %s' % exc)
try:
sleep(50)
node = self._get_libcloud_node(machine)
return self.connection.ex_confirm_resize(node)
except Exception as exc:
sleep(50)
node = self._get_libcloud_node(machine)
try:
return self.connection.ex_confirm_resize(node)
except Exception as exc:
raise BadRequestError('Failed to resize node: %s' % exc)
def _list_machines__postparse_machine(self, machine, node_dict):
updated = False
# do not include ipv6 on public ips
public_ips = []
for ip in machine.public_ips:
if ip and ':' not in ip:
public_ips.append(ip)
if machine.public_ips != public_ips:
machine.public_ips = public_ips
updated = True
return updated
def _list_machines__get_location(self, node):
return node['extra'].get('availability_zone', '')
def _list_sizes__get_cpu(self, size):
return size.vcpus
def _list_machines__get_size(self, node):
return node['extra'].get('flavorId')
def _list_security_groups(self):
if self.cloud.tenant_id is None:
# try to populate tenant_id field
try:
tenant_id = \
self.cloud.ctl.compute.connection.ex_get_tenant_id()
except Exception as exc:
log.error(
'Failed to retrieve project id for Openstack cloud %s: %r',
self.cloud.id, exc)
else:
self.cloud.tenant_id = tenant_id
try:
self.cloud.save()
except me.ValidationError as exc:
log.error(
'Error adding tenant_id to %s: %r',
self.cloud.title, exc)
try:
sec_groups = \
self.cloud.ctl.compute.connection.ex_list_security_groups(
tenant_id=self.cloud.tenant_id
)
except Exception as exc:
log.error('Could not list security groups for cloud %s: %r',
self.cloud, exc)
raise CloudUnavailableError(exc=exc)
sec_groups = [{'id': sec_group.id,
'name': sec_group.name,
'tenant_id': sec_group.tenant_id,
'description': sec_group.description,
}
for sec_group in sec_groups]
return sec_groups
def _list_locations__fetch_locations(self):
return self.connection.ex_list_availability_zones()
def _generate_plan__parse_location(self, auth_context, location_search):
# If a location string is not given, let openstack set
# the default location
if not location_search:
from mist.api.clouds.models import CloudLocation
return [CloudLocation()]
locations = super()._generate_plan__parse_location(
auth_context, location_search)
# filter out locations that do not supoort compute resources
return [location for location in locations
if location.extra.get('compute', False) is True]
def _generate_plan__parse_networks(self, auth_context, networks_dict,
location):
from mist.api.methods import list_resources
from mist.api.networks.models import Network
ret_dict = {}
ret_dict['associate_floating_ip'] = True if networks_dict.get(
'associate_floating_ip', True) is True else False
networks = networks_dict.get('networks', [])
ret_dict['networks'] = []
# if multiple networks exist, network parameter must be defined
if (len(networks) == 0 and Network.objects(cloud=self.cloud, missing_since=None).count() > 1): # noqa
raise BadRequestError('Multiple networks found, define a network to be more specific.') # noqa
for net in networks:
try:
[network], _ = list_resources(auth_context, 'network',
search=net,
cloud=self.cloud.id,
limit=1)
except ValueError:
raise NotFoundError(f'Network {net} does not exist')
ret_dict['networks'].append({'id': network.network_id,
'name': network.name})
try:
security_groups = set(networks_dict.get('security_groups', []))
except TypeError:
raise BadRequestError('Invalid type for security groups')
ret_dict['security_groups'] = []
if security_groups:
try:
sec_groups = \
self.cloud.ctl.compute.connection.ex_list_security_groups(
tenant_id=self.cloud.tenant_id
)
except Exception as exc:
log.exception('Could not list security groups for cloud %s',
self.cloud)
raise CloudUnavailableError(exc=exc) from None
ret_dict['security_groups'] = list({
sec_group.name for sec_group in sec_groups
if sec_group.name in security_groups or
sec_group.id in security_groups})
return ret_dict
def _generate_plan__parse_volume_attrs(self, volume_dict, vol_obj):
delete_on_termination = True if volume_dict.get(
'delete_on_termination', False) is True else False
boot = True if volume_dict.get(
'boot', False) is True else False
return {
'id': vol_obj.id,
'name': vol_obj.name,
'delete_on_termination': delete_on_termination,
'boot': boot,
}
def _generate_plan__parse_custom_volume(self, volume_dict):
try:
size = int(volume_dict['size'])
except KeyError:
raise BadRequestError('Volume size is required')
except (TypeError, ValueError):
raise BadRequestError('Invalid volume size type')
delete_on_termination = True if volume_dict.get(
'delete_on_termination', False) is True else False
boot = True if volume_dict.get(
'boot', False) is True else False
return {
'size': size,
'delete_on_termination': delete_on_termination,
'boot': boot,
}
def _generate_plan__post_parse_plan(self, plan):
volumes = plan.get('volumes', [])
# make sure boot drive is first if it exists
volumes.sort(key=lambda k: k['boot'],
reverse=True)
if len(volumes) > 1:
# make sure only one boot volume is set
if volumes[1].get('boot') is True:
raise BadRequestError('Up to 1 volume must be set as boot')
plan['volumes'] = volumes
def _create_machine__compute_kwargs(self, plan):
from libcloud.compute.drivers.openstack import OpenStackSecurityGroup
from libcloud.compute.drivers.openstack import OpenStackNetwork
kwargs = super()._create_machine__compute_kwargs(plan)
if kwargs.get('location'):
kwargs['ex_availability_zone'] = kwargs.pop('location').name
if plan.get('cloudinit'):
kwargs['ex_userdata'] = plan['cloudinit']
key = kwargs.pop('auth')
try:
openstack_keys = self.connection.list_key_pairs()
except Exception as exc:
log.exception('Failed to fetch keypairs')
raise
for openstack_key in openstack_keys:
if key.public == openstack_key.public_key:
server_key = openstack_key
break
else:
try:
server_key = self.connection.ex_import_keypair_from_string(
name=f'mistio-{secrets.token_hex(3)}',
)
except Exception:
log.exception('Failed to create keypair')
raise
kwargs['ex_keyname'] = server_key.name
# use dummy objects with only the attributes needed
kwargs['networks'] = [OpenStackNetwork(network['id'],
None,
None,
self.connection)
for network in plan['networks']['networks']]
kwargs['ex_security_groups'] = [
OpenStackSecurityGroup(id=None,
name=sec_group,
tenant_id=None,
description=None,
driver=self.connection)
for sec_group in plan['networks']['security_groups']
]
blockdevicemappings = []
for volume in plan['volumes']:
mapping = {
'delete_on_termination': volume['delete_on_termination'],
'destination_type': 'volume',
}
if volume.get('id'):
from mist.api.volumes.models import Volume
vol = Volume.objects.get(id=volume['id'])
if volume['boot'] is True:
mapping['boot_index'] = 0
else:
mapping['boot_index'] = None
mapping['uuid'] = vol.external_id
mapping['source_type'] = 'volume'
else:
mapping['volume_size'] = volume['size']
if volume['boot'] is True:
mapping['boot_index'] = 0
mapping['source_type'] = 'image'
mapping['uuid'] = kwargs.pop('image').id
else:
mapping['boot_index'] = None
mapping['source_type'] = 'blank'
blockdevicemappings.append(mapping)
# This is a workaround for an issue which occurs only
# when non-boot volumes are passed. Openstack expects a
# block device mapping with boot_index 0.
# http://lists.openstack.org/pipermail/openstack-dev/2015-March/059332.html # noqa
if (blockdevicemappings and
blockdevicemappings[0]['boot_index'] is None):
blockdevicemappings.insert(0, {'uuid': kwargs.pop('image').id,
'source_type': 'image',
'destination_type': 'local',
'boot_index': 0,
'delete_on_termination': True})
kwargs['ex_blockdevicemappings'] = blockdevicemappings
return kwargs
def _create_machine__post_machine_creation_steps(self, node, kwargs, plan):
if plan['networks']['associate_floating_ip'] is False:
return
# From the already created floating ips try to find one
# that is not associated to a node
floating_ips = self.connection.ex_list_floating_ips()
unassociated_floating_ip = next((ip for ip in floating_ips
if ip.status | |
<reponame>simonpf/gprof_nn
"""
==================
gprof_nn.bin.train
==================
This sub-module implements the 'train' sub-command of the
'gprof_nn' command line application, which trains
networks for the GPROF-NN retrieval algorithm.
"""
import argparse
import logging
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
from gprof_nn import sensors, statistics
import gprof_nn.logging
from gprof_nn.retrieval import RetrievalDriver, RetrievalGradientDriver
from gprof_nn.definitions import (
ALL_TARGETS,
PROFILE_NAMES,
CONFIGURATIONS,
GPROF_NN_DATA_PATH,
)
from gprof_nn.data.training_data import (
GPROF_NN_1D_Dataset,
GPROF_NN_3D_Dataset,
SimulatorDataset,
)
from gprof_nn.models import (
GPROF_NN_1D_DRNN,
GPROF_NN_1D_QRNN,
GPROF_NN_3D_DRNN,
GPROF_NN_3D_QRNN,
Simulator,
)
LOGGER = logging.getLogger(__name__)
def add_parser(subparsers):
"""
Add parser for 'train' command to top-level parser. This function
is called from the top-level parser defined in 'gprof_nn.bin'.
Args:
subparsers: The subparsers object provided by the top-level parser.
"""
parser = subparsers.add_parser(
"train",
description=(
"""
Trains a GPROF-NN 1D or 3D network.
"""
),
)
# Input and output data
parser.add_argument(
"variant",
metavar="kind",
type=str,
help="The type of GPROF-NN model to train: '1D' or '3D' or 'SIM'",
)
parser.add_argument(
"sensor",
metavar="sensor",
type=str,
help="Name of the sensor for which to train the algorithm.",
)
parser.add_argument(
"configuration",
metavar="[ERA5/GANAL]",
type=str,
help="The configuration for which the model is trained.",
)
parser.add_argument(
"training_data",
metavar="training_data",
type=str,
help="Path to training data.",
)
parser.add_argument(
"validation_data",
metavar="validation_data",
type=str,
help="Path to validation data.",
)
parser.add_argument(
"output", metavar="output", type=str, nargs=1, help="Where to store the model."
)
# Model configuration
parser.add_argument(
"--type",
metavar="network_type",
type=str,
nargs=1,
help="The type of network: drnn, qrnn or qrnn_exp",
default="qrnn_exp",
)
parser.add_argument(
"--n_layers_body",
metavar="n",
type=int,
default=6,
help=(
"For GPROF-NN 1D: The number of hidden layers in the shared body"
" of the network."
),
)
parser.add_argument(
"--n_neurons_body",
metavar="n",
type=int,
default=256,
help=("For GPROF-NN 1D and 3D: The number of neurons in the body."),
)
parser.add_argument(
"--n_layers_head",
metavar="n",
type=int,
default=2,
help="For GPROF-NN 1D: How many layers in the heads of the model.",
)
parser.add_argument(
"--n_neurons_head",
metavar="n",
type=int,
default=128,
help=("For GPROF-NN 1D and 3D: How many neurons in each head of the " "model."),
)
parser.add_argument(
"--n_blocks",
metavar="N",
type=int,
nargs="+",
default=[2],
help=(
"For GPROF-NN 3D: The number of Xception block per "
" downsampling stage of the model."
),
)
parser.add_argument(
"--activation",
metavar="activation",
type=str,
nargs=1,
default="ReLU",
help="For GPROF-NN 1D: The activation function.",
)
parser.add_argument(
"--residuals",
metavar="residuals",
type=str,
nargs=1,
default="simple",
help="For GPROF-NN 1D: The type of residual connections to apply.",
)
parser.add_argument(
"--n_epochs",
metavar="n",
type=int,
nargs="*",
default=[20, 20, 20],
help=(
"For how many epochs to train the network. When multiple values "
"are given the network is trained multiple times (warm restart)."
),
)
parser.add_argument(
"--learning_rate",
metavar="lr",
type=float,
nargs="*",
default=[0.0005, 0.0005, 0.0001],
help="The learning rates to use during training.",
)
parser.add_argument(
"--no_lr_schedule", action="store_true", help="Disable learning rate schedule."
)
parser.add_argument(
"--no_ancillary",
action="store_false",
help="Don't use acillary data in retrieval.",
)
parser.add_argument(
"--no_validation",
action="store_true",
help="Disable performance monitoring a validation set",
)
# Other
parser.add_argument(
"--device",
metavar="device",
type=str,
help="The name of the device on which to run the training",
)
parser.add_argument(
"--targets",
metavar="target_1 target_2",
type=str,
nargs="+",
help="The target on which to train the network",
)
parser.add_argument(
"--batch_size",
metavar="n",
type=int,
help="The batch size to use for training.",
default=8,
)
parser.add_argument(
"--permute",
metavar="feature_index",
type=int,
help=(
"If provided, the input feature with the given index " "will be permuted."
),
)
parser.set_defaults(func=run)
def run(args):
"""
Run the training.
Args:
args: The namespace object provided by the top-level parser.
"""
sensor = args.sensor
configuration = args.configuration
training_data = args.training_data[0]
validation_data = args.validation_data[0]
#
# Determine sensor
#
sensor = sensor.strip().upper()
sensor = getattr(sensors, sensor, None)
if sensor is None:
LOGGER.error("Sensor '%s' is not supported.", args.sensor.strip().upper())
return 1
variant = args.variant
if variant.upper() not in ["1D", "3D", "SIM"]:
LOGGER.error("'variant' should be one of ['1D', '3D', 'SIM']")
return 1
#
# Configuration
#
if configuration.upper() not in CONFIGURATIONS:
LOGGER.error("'configuration' should be one of $s.", CONFIGURATIONS)
return 1
# Check output path and define model name if necessary.
output = Path(args.output[0])
if output.is_dir() and not output.exists():
LOGGER.error("The output path '%s' doesn't exist.", output)
return 1
if not output.is_dir() and not output.parent.exists():
LOGGER.error("The output path '%s' doesn't exist.", output.parent)
return 1
if output.is_dir():
network_name = (
f"gprof_nn_{variant.lower()}_{sensor.name.lower()}_"
f"{configuration.lower()}.pckl"
)
output = output / network_name
training_data = args.training_data
validation_data = args.validation_data
if variant.upper() == "1D":
run_training_1d(
sensor, configuration, training_data, validation_data, output, args
)
elif variant.upper() == "3D":
run_training_3d(
sensor, configuration, training_data, validation_data, output, args
)
elif variant.upper() == "SIM":
run_training_sim(
sensor, configuration, training_data, validation_data, output, args
)
else:
raise ValueError("'variant' should be one of '1D', '3D', 'SIM'.")
def run_training_1d(
sensor, configuration, training_data, validation_data, output, args
):
"""
Run training for GPROF-NN 1D algorithm.
Args:
sensor: Sensor object representing the sensor for which to train
an algorithm.
configuration: String identifying the retrieval configuration.
training_data: The path to the training data.
validation_data: The path to the validation data.
output: Path to which to write the resulting model.
args: Namespace with the remaining command line arguments.
"""
from quantnn.qrnn import QRNN
from quantnn.normalizer import Normalizer
from quantnn.data import DataFolder
from quantnn.transformations import LogLinear
from quantnn.models.pytorch.logging import TensorBoardLogger
from quantnn.metrics import ScatterPlot
import torch
from torch import optim
torch.multiprocessing.set_sharing_strategy("file_system")
torch.set_num_threads(1)
n_layers_body = args.n_layers_body
n_neurons_body = args.n_neurons_body
n_layers_head = args.n_layers_head
n_neurons_head = args.n_neurons_head
activation = args.activation[0]
residuals = args.residuals[0]
device = args.device
targets = args.targets
network_type = args.type[0]
batch_size = args.batch_size
permute = args.permute
ancillary = args.no_ancillary
n_epochs = args.n_epochs
lr = args.learning_rate
no_schedule = args.no_lr_schedule
if len(n_epochs) == 1:
n_epochs = n_epochs * len(lr)
if len(lr) == 1:
lr = lr * len(n_epochs)
#
# Load training data.
#
dataset_factory = GPROF_NN_1D_Dataset
normalizer_path = GPROF_NN_DATA_PATH / f"normalizer_{sensor.name.lower()}.pckl"
normalizer = Normalizer.load(normalizer_path)
kwargs = {
"sensor": sensor,
"batch_size": batch_size,
"normalizer": normalizer,
"targets": targets,
"augment": True,
"permute": permute,
}
training_data = DataFolder(
training_data, dataset_factory, kwargs=kwargs, queue_size=64, n_workers=6
)
if args.no_validation:
validation_data = None
else:
kwargs = {
"sensor": sensor,
"batch_size": 4 * batch_size,
"normalizer": normalizer,
"targets": targets,
"augment": False,
"permute": permute,
}
validation_data = DataFolder(
validation_data, dataset_factory, kwargs=kwargs, queue_size=64, n_workers=2
)
#
# Create neural network model
#
if Path(output).exists():
try:
xrnn = QRNN.load(output)
LOGGER.info(f"Continuing training of existing model {output}.")
except Exception:
xrnn = None
else:
xrnn = None
if xrnn is None:
LOGGER.info(f"Creating new model of type {network_type}.")
if network_type == "drnn":
xrnn = GPROF_NN_1D_DRNN(
sensor,
n_layers_body,
n_neurons_body,
n_layers_head,
n_neurons_head,
activation=activation,
residuals=residuals,
targets=targets,
ancillary=ancillary,
)
elif network_type == "qrnn_exp":
transformation = {t: LogLinear() for t in targets}
transformation["latent_heat"] = None
xrnn = GPROF_NN_1D_QRNN(
sensor,
n_layers_body,
n_neurons_body,
n_layers_head,
n_neurons_head,
activation=activation,
residuals=residuals,
transformation=transformation,
targets=targets,
ancillary=ancillary,
)
else:
xrnn = GPROF_NN_1D_QRNN(
sensor,
n_layers_body,
n_neurons_body,
n_layers_head,
n_neurons_head,
activation=activation,
residuals=residuals,
targets=targets,
ancillary=ancillary,
)
model = xrnn.model
xrnn.normalizer = normalizer
xrnn.configuration = configuration
xrnn.sensor = sensor.full_name
###############################################################################
# Run the training.
###############################################################################
n_epochs_tot = sum(n_epochs)
logger = TensorBoardLogger(n_epochs_tot)
logger.set_attributes(
{
"sensor": sensor.name,
"configuration": configuration,
"n_layers_body": n_layers_body,
"n_neurons_body": n_neurons_body,
"n_layers_head": n_layers_head,
"n_neurons_head": n_neurons_head,
"activation": activation,
"residuals": residuals,
"targets": ", ".join(targets),
"type": network_type,
"optimizer": "adam",
}
)
metrics = ["MeanSquaredError", "Bias", "CalibrationPlot", "CRPS"]
scatter_plot = ScatterPlot(log_scale=True)
metrics.append(scatter_plot)
for n, r in zip(n_epochs, lr):
LOGGER.info(f"Starting training for {n} epochs with learning rate {r}")
optimizer = optim.Adam(model.parameters(), lr=r)
if no_schedule:
scheduler = None
else:
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, n)
xrnn.train(
training_data=training_data,
validation_data=validation_data,
n_epochs=n,
optimizer=optimizer,
scheduler=scheduler,
logger=logger,
metrics=metrics,
device=device,
mask=-9999,
)
LOGGER.info(f"Saving training network to {output}.")
xrnn.save(output)
def run_training_3d(
sensor, configuration, training_data, validation_data, output, args
):
"""
Run training for GPROF-NN 3D algorithm.
Args:
sensor: Sensor object representing the sensor for which to train
an algorithm.
configuration: String identifying the retrieval configuration.
training_data: The path to the training data.
validation_data: The path to the validation data.
output: Path to which to write the resulting model.
args: Namespace with the remaining command line arguments.
"""
from quantnn.qrnn import QRNN
from quantnn.normalizer import Normalizer
from quantnn.data import DataFolder
from quantnn.transformations import LogLinear
from quantnn.models.pytorch.logging import TensorBoardLogger
from quantnn.metrics import ScatterPlot
import torch
from torch import optim
torch.multiprocessing.set_sharing_strategy("file_system")
torch.set_num_threads(1)
n_blocks = args.n_blocks[0]
n_neurons_body = args.n_neurons_body
n_layers_head = args.n_layers_head
n_neurons_head = args.n_neurons_head
device = args.device
targets = args.targets
network_type = args.type[0]
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = args.learning_rate
no_schedule = args.no_lr_schedule
ancillary = args.no_ancillary
if len(n_epochs) == 1:
n_epochs = n_epochs * len(lr)
if len(lr) == 1:
lr = lr * len(n_epochs)
#
# Load training data.
#
dataset_factory = | |
<reponame>sailxjx/DI-engine
"""
This implementation of ResNet is a bit modification version of `https://github.com/rwightman/pytorch-image-models.git`
"""
from typing import List
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from .nn_module import Flatten
def to_2tuple(item):
if np.isscalar(item):
return (item, item)
else:
return item
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, k: int, s: int, d: int):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
# Dynamically pad input x with 'SAME' padding for conv with specified args
def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value)
return x
def avg_pool2d_same(
x,
kernel_size: List[int],
stride: List[int],
padding: List[int] = (0, 0),
ceil_mode: bool = False,
count_include_pad: bool = True
):
# FIXME how to deal with count_include_pad vs not for external padding?
x = pad_same(x, kernel_size, stride)
return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
class AvgPool2dSame(nn.AvgPool2d):
""" Tensorflow like 'SAME' wrapper for 2D average pooling
"""
def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
def forward(self, x):
x = pad_same(x, self.kernel_size, self.stride)
return F.avg_pool2d(x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad)
def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False):
flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
if not pool_type:
assert num_classes == 0 or use_conv,\
'Pooling can only be disabled if classifier is also removed or conv classifier is used'
flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
assert flatten_in_pool
global_pool = nn.AdaptiveAvgPool2d(1)
num_pooled_features = num_features * 1
return global_pool, num_pooled_features
def _create_fc(num_features, num_classes, use_conv=False):
if num_classes <= 0:
fc = nn.Identity() # pass-through (no classifier)
elif use_conv:
fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
else:
# use nn.Linear for simplification
fc = nn.Linear(num_features, num_classes, bias=True)
return fc
def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False):
assert pool_type == 'avg'
global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv)
fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)
return global_pool, fc
class ClassifierHead(nn.Module):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False):
super(ClassifierHead, self).__init__()
self.drop_rate = drop_rate
self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv)
self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)
self.flatten = Flatten(1) if use_conv and pool_type else nn.Identity()
def forward(self, x):
x = self.global_pool(x)
if self.drop_rate:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
x = self.fc(x)
x = self.flatten(x)
return x
def create_attn(layer, plane):
return None
def get_padding(kernel_size, stride, dilation=1):
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=64,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None
):
super(BasicBlock, self).__init__()
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock does not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
self.conv1 = nn.Conv2d(
inplanes,
first_planes,
kernel_size=3,
stride=1 if use_aa else stride,
padding=first_dilation,
dilation=first_dilation,
bias=False
)
self.bn1 = norm_layer(first_planes)
self.act1 = act_layer(inplace=True)
self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None
self.conv2 = nn.Conv2d(first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act2 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.bn2.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act1(x)
if self.aa is not None:
x = self.aa(x)
x = self.conv2(x)
x = self.bn2(x)
if self.drop_block is not None:
x = self.drop_block(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act2(x)
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=64,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None
):
super(Bottleneck, self).__init__()
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(first_planes)
self.act1 = act_layer(inplace=True)
self.conv2 = nn.Conv2d(
first_planes,
width,
kernel_size=3,
stride=1 if use_aa else stride,
padding=first_dilation,
dilation=first_dilation,
groups=cardinality,
bias=False
)
self.bn2 = norm_layer(width)
self.act2 = act_layer(inplace=True)
self.aa = aa_layer(channels=width, stride=stride) if use_aa else None
self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)
self.bn3 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act2(x)
if self.aa is not None:
x = self.aa(x)
x = self.conv3(x)
x = self.bn3(x)
if self.drop_block is not None:
x = self.drop_block(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act3(x)
return x
def downsample_conv(in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = norm_layer or nn.BatchNorm2d
kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1
p = get_padding(kernel_size, stride, first_dilation)
return nn.Sequential(
*[
nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False
),
norm_layer(out_channels)
]
)
def downsample_avg(in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = norm_layer or nn.BatchNorm2d
avg_stride = stride if dilation == 1 else 1
if stride == 1 and dilation == 1:
pool = nn.Identity()
else:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
return nn.Sequential(
*[pool,
nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False),
norm_layer(out_channels)]
)
def drop_blocks(drop_block_rate=0.):
assert drop_block_rate == 0., drop_block_rate
return [None for _ in range(4)]
def make_blocks(
block_fn,
channels,
block_repeats,
inplanes,
reduce_first=1,
output_stride=32,
down_kernel_size=1,
avg_down=False,
drop_block_rate=0.,
drop_path_rate=0.,
**kwargs
):
stages = []
feature_info = []
net_num_blocks = sum(block_repeats)
net_block_idx = 0
net_stride = 4
dilation = prev_dilation = 1
for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))):
stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it
stride = 1 if stage_idx == 0 else 2
if net_stride >= output_stride:
dilation *= stride
stride = 1
else:
net_stride *= stride
downsample = None
if stride != 1 or inplanes != planes * block_fn.expansion:
down_kwargs = dict(
in_channels=inplanes,
out_channels=planes * block_fn.expansion,
kernel_size=down_kernel_size,
stride=stride,
dilation=dilation,
first_dilation=prev_dilation,
norm_layer=kwargs.get('norm_layer')
)
downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs)
block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs)
blocks = []
for block_idx in range(num_blocks):
downsample = downsample if block_idx == 0 else None
stride = stride if block_idx == 0 else 1
block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule
blocks.append(
block_fn(
inplanes, planes, stride, downsample, first_dilation=prev_dilation, drop_path=None, **block_kwargs
)
)
prev_dilation = dilation
inplanes = planes * block_fn.expansion
net_block_idx += 1
stages.append((stage_name, nn.Sequential(*blocks)))
feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name))
return stages, feature_info
class ResNet(nn.Module):
"""ResNet / ResNeXt / SE-ResNeXt / SE-Net
This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that
* have > 1 stride in the 3x3 conv layer of bottleneck
* have conv-bn-act ordering
This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s
variants included in the MXNet Gluon ResNetV1b model. The C and D variants | |
= MatrixSymbol('T', 3, 3)
>>> P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TS))
T[0, 2]*T[1, 0] + T[1, 1]*T[1, 2] + T[1, 2]*T[2, 2]
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
Probabilities will be calculated based on indexes rather
than state names. For example, with the Sunny-Cloudy-Rainy
model with string state names:
>>> from sympy.core.symbol import Str
>>> Y = DiscreteMarkovChain("Y", [Str('Sunny'), Str('Cloudy'), Str('Rainy')], T)
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
This gives the same answer as the ``[0, 1, 2]`` state space.
Currently, there is no support for state names within probability
and expectation statements. Here is a work-around using ``Str``:
>>> P(Eq(Str('Rainy'), Y[3]), Eq(Y[1], Str('Cloudy'))).round(2)
0.36
Symbol state names can also be used:
>>> sunny, cloudy, rainy = symbols('Sunny, Cloudy, Rainy')
>>> Y = DiscreteMarkovChain("Y", [sunny, cloudy, rainy], T)
>>> P(Eq(Y[3], rainy), Eq(Y[1], cloudy)).round(2)
0.36
Expectations will be calculated as follows:
>>> E(Y[3], Eq(Y[1], cloudy))
0.38*Cloudy + 0.36*Rainy + 0.26*Sunny
Probability of expressions with multiple RandomIndexedSymbols
can also be calculated provided there is only 1 RandomIndexedSymbol
in the given condition. It is always better to use Rational instead
of floating point numbers for the probabilities in the
transition matrix to avoid errors.
>>> from sympy import Gt, Le, Rational
>>> T = Matrix([[Rational(5, 10), Rational(3, 10), Rational(2, 10)], [Rational(2, 10), Rational(7, 10), Rational(1, 10)], [Rational(3, 10), Rational(3, 10), Rational(4, 10)]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> P(Eq(Y[3], Y[1]), Eq(Y[0], 0)).round(3)
0.409
>>> P(Gt(Y[3], Y[1]), Eq(Y[0], 0)).round(2)
0.36
>>> P(Le(Y[15], Y[10]), Eq(Y[8], 2)).round(7)
0.6963328
There is limited support for arbitrarily sized states:
>>> n = symbols('n', nonnegative=True, integer=True)
>>> T = MatrixSymbol('T', n, n)
>>> Y = DiscreteMarkovChain("Y", trans_probs=T)
>>> Y.state_space
Range(0, n, 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Discrete-time_Markov_chain
.. [2] https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
"""
index_set = S.Naturals0
def __new__(cls, sym, state_space=None, trans_probs=None):
# type: (Basic, tUnion[str, Symbol], tSequence, tUnion[MatrixBase, MatrixSymbol]) -> DiscreteMarkovChain
sym = _symbol_converter(sym)
state_space, trans_probs = MarkovProcess._sanity_checks(state_space, trans_probs)
obj = Basic.__new__(cls, sym, state_space, trans_probs)
indices = dict()
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj._state_index):
indices[state] = index
obj.index_of = indices
return obj
@property
def transition_probabilities(self) -> tUnion[MatrixBase, MatrixSymbol]:
"""
Transition probabilities of discrete Markov chain,
either an instance of Matrix or MatrixSymbol.
"""
return self.args[2]
def _transient2transient(self):
"""
Computes the one step probabilities of transient
states to transient states. Used in finding
fundamental matrix, absorbing probabilities.
"""
trans_probs = self.transition_probabilities
if not isinstance(trans_probs, ImmutableMatrix):
return None
m = trans_probs.shape[0]
trans_states = [i for i in range(m) if trans_probs[i, i] != 1]
t2t = [[trans_probs[si, sj] for sj in trans_states] for si in trans_states]
return ImmutableMatrix(t2t)
def _transient2absorbing(self):
"""
Computes the one step probabilities of transient
states to absorbing states. Used in finding
fundamental matrix, absorbing probabilities.
"""
trans_probs = self.transition_probabilities
if not isinstance(trans_probs, ImmutableMatrix):
return None
m, trans_states, absorb_states = \
trans_probs.shape[0], [], []
for i in range(m):
if trans_probs[i, i] == 1:
absorb_states.append(i)
else:
trans_states.append(i)
if not absorb_states or not trans_states:
return None
t2a = [[trans_probs[si, sj] for sj in absorb_states]
for si in trans_states]
return ImmutableMatrix(t2a)
def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]:
"""
Returns the list of communication classes that partition
the states of the markov chain.
A communication class is defined to be a set of states
such that every state in that set is reachable from
every other state in that set. Due to its properties
this forms a class in the mathematical sense.
Communication classes are also known as recurrence
classes.
Returns
=======
classes
The ``classes`` are a list of tuples. Each
tuple represents a single communication class
with its properties. The first element in the
tuple is the list of states in the class, the
second element is whether the class is recurrent
and the third element is the period of the
communication class.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix
>>> T = Matrix([[0, 1, 0],
... [1, 0, 0],
... [1, 0, 0]])
>>> X = DiscreteMarkovChain('X', [1, 2, 3], T)
>>> classes = X.communication_classes()
>>> for states, is_recurrent, period in classes:
... states, is_recurrent, period
([1, 2], True, 2)
([3], False, 1)
From this we can see that states ``1`` and ``2``
communicate, are recurrent and have a period
of 2. We can also see state ``3`` is transient
with a period of 1.
Notes
=====
The algorithm used is of order ``O(n**2)`` where
``n`` is the number of states in the markov chain.
It uses Tarjan's algorithm to find the classes
themselves and then it uses a breadth-first search
algorithm to find each class's periodicity.
Most of the algorithm's components approach ``O(n)``
as the matrix becomes more and more sparse.
References
==========
.. [1] http://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf
.. [2] http://cecas.clemson.edu/~shierd/Shier/markov.pdf
.. [3] https://ujcontent.uj.ac.za/vital/access/services/Download/uj:7506/CONTENT1
.. [4] https://www.mathworks.com/help/econ/dtmc.classify.html
"""
n = self.number_of_states
T = self.transition_probabilities
if isinstance(T, MatrixSymbol):
raise NotImplementedError("Cannot perform the operation with a symbolic matrix.")
# begin Tarjan's algorithm
V = Range(n)
# don't use state names. Rather use state
# indexes since we use them for matrix
# indexing here and later onward
E = [(i, j) for i in V for j in V if T[i, j] != 0]
classes = strongly_connected_components((V, E))
# end Tarjan's algorithm
recurrence = []
periods = []
for class_ in classes:
# begin recurrent check (similar to self._check_trans_probs())
submatrix = T[class_, class_] # get the submatrix with those states
is_recurrent = S.true
rows = submatrix.tolist()
for row in rows:
if (sum(row) - 1) != 0:
is_recurrent = S.false
break
recurrence.append(is_recurrent)
# end recurrent check
# begin breadth-first search
non_tree_edge_values = set()
visited = {class_[0]}
newly_visited = {class_[0]}
level = {class_[0]: 0}
current_level = 0
done = False # imitate a do-while loop
while not done: # runs at most len(class_) times
done = len(visited) == len(class_)
current_level += 1
# this loop and the while loop above run a combined len(class_) number of times.
# so this triple nested loop runs through each of the n states once.
for i in newly_visited:
# the loop below runs len(class_) number of times
# complexity is around about O(n * avg(len(class_)))
newly_visited = {j for j in class_ if T[i, j] != 0}
new_tree_edges = newly_visited.difference(visited)
for j in new_tree_edges:
level[j] = current_level
new_non_tree_edges = newly_visited.intersection(visited)
new_non_tree_edge_values = {level[i]-level[j]+1 for j in new_non_tree_edges}
non_tree_edge_values = non_tree_edge_values.union(new_non_tree_edge_values)
visited = visited.union(new_tree_edges)
# igcd needs at least 2 arguments
positive_ntev = {val_e for val_e in non_tree_edge_values if val_e > 0}
if len(positive_ntev) == 0:
periods.append(len(class_))
elif len(positive_ntev) == 1:
periods.append(positive_ntev.pop())
else:
periods.append(igcd(*positive_ntev))
# end breadth-first search
# convert back to the user's state names
classes = [[self._state_index[i] for i in class_] for class_ in classes]
return sympify(list(zip(classes, recurrence, periods)))
def fundamental_matrix(self):
Q = self._transient2transient()
if Q == None:
return None
I = eye(Q.shape[0])
if (I - Q).det() == 0:
raise ValueError("Fundamental matrix doesn't exists.")
return ImmutableMatrix((I - Q).inv().tolist())
def absorbing_probabilities(self):
"""
Computes the absorbing probabilities, i.e.,
the ij-th entry of the matrix denotes the
probability of Markov chain being absorbed
in state j starting from state i.
"""
R = self._transient2absorbing()
N = self.fundamental_matrix()
if R == None or N == None:
return None
return N*R
def absorbing_probabilites(self):
SymPyDeprecationWarning(
feature="absorbing_probabilites",
useinstead="absorbing_probabilities",
issue=20042,
deprecated_since_version="1.7"
).warn()
return self.absorbing_probabilities()
def is_regular(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, periods = list(zip(*tuples))
return And(len(classes) == 1, periods[0] == 1)
def is_ergodic(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, _ = list(zip(*tuples))
return S(len(classes) == 1)
def is_absorbing_state(self, state):
trans_probs = self.transition_probabilities
if isinstance(trans_probs, ImmutableMatrix) and \
state < trans_probs.shape[0]:
return S(trans_probs[state, state]) is S.One
def is_absorbing_chain(self):
states, A, B, C = self.decompose()
r = A.shape[0]
return And(r > 0, A == Identity(r).as_explicit())
def stationary_distribution(self, condition_set=False) -> tUnion[ImmutableMatrix, ConditionSet, Lambda]:
"""
The stationary | |
import datetime
import io
import json
import logging
import os
import random
import uuid
from datetime import datetime as datetime_constructor
from datetime import timezone
import matplotlib
matplotlib.use("Agg")
import cv2
import matplotlib.pyplot as plt
import pandas as pd
import psycopg2
import pytz
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from geopandas import geopandas
from material import *
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.transforms import Bbox
from metpy.interpolate import interpolate_to_grid
from pandas import json_normalize
from rest_framework import viewsets, status, generics, views
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import FileUploadParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from slugify import slugify
from tempestas_api import settings
from wx import serializers, tasks
from wx.decoders import insert_raw_data_pgia
from wx.decoders.hobo import read_file as read_file_hobo
from wx.decoders.toa5 import read_file
from wx.forms import StationForm
from wx.models import AdministrativeRegion, StationFile, Decoder, QualityFlag, DataFile, DataFileStation, \
DataFileVariable, StationImage, WMOStationType, WMORegion, WMOProgram, StationCommunication
from wx.models import Country, Unit, Station, Variable, DataSource, StationVariable, \
StationProfile, Document, Watershed, Interval
from wx.utils import get_altitude, get_watershed, get_district, get_interpolation_image, parse_float_value, \
parse_int_value
from .utils import get_raw_data, get_station_raw_data
logger = logging.getLogger('surface.urls')
# CONSTANT to be used in datetime to milliseconds conversion
EPOCH = datetime_constructor(1970, 1, 1, tzinfo=timezone.utc)
@csrf_exempt
def ScheduleDataExport(request):
if request.method != 'POST':
return HttpResponse(status=405)
json_body = json.loads(request.body)
station_ids = json_body['stations'] # array with station ids
data_source = json_body[
'source'] # one of raw_data, hourly_summary, daily_summary, monthly_summary or yearly_summary
start_date = json_body['start_datetime'] # in format %Y-%m-%d %H:%M:%S
end_date = json_body['end_datetime'] # in format %Y-%m-%d %H:%M:%S
variable_ids = json_body['variables'] # list of obj in format {id: Int, agg: Str}
data_interval_seconds = None
if data_source == 'raw_data' and 'data_interval' in json_body: # a number with the data interval in seconds. Only required for raw_data
data_interval_seconds = json_body['data_interval']
elif data_source == 'raw_data':
data_interval_seconds = 300
created_data_file_ids = []
start_date_utc = pytz.UTC.localize(datetime.datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S'))
end_date_utc = pytz.UTC.localize(datetime.datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S'))
if start_date_utc > end_date_utc:
message = 'The initial date must be greater than final date.'
return JsonResponse(data={"message": message}, status=status.HTTP_400_BAD_REQUEST)
days_interval = (end_date_utc - start_date_utc).days
data_source_dict = {
"raw_data": "Raw Data",
"hourly_summary": "Hourly Summary",
"daily_summary": "Daily Summary",
"monthly_summary": "Monthly Summary",
"yearly_summary": "Yearly Summary",
}
data_source_description = data_source_dict[data_source]
prepared_by = None
if request.user.first_name and request.user.last_name:
prepared_by = f'{request.user.first_name} {request.user.last_name}'
else:
prepared_by = request.user.username
for station_id in station_ids:
newfile = DataFile.objects.create(ready=False, initial_date=start_date_utc, final_date=end_date_utc,
source=data_source_description, prepared_by=prepared_by,
interval_in_seconds=data_interval_seconds)
DataFileStation.objects.create(datafile=newfile, station_id=station_id)
for variable_id in variable_ids:
variable = Variable.objects.get(pk=variable_id)
DataFileVariable.objects.create(datafile=newfile, variable=variable)
tasks.export_data.delay(station_id, data_source, start_date, end_date, variable_ids, newfile.id)
created_data_file_ids.append(newfile.id)
return HttpResponse(created_data_file_ids, status=status.HTTP_200_OK)
@api_view(('GET',))
def DataExportFiles(request):
files = []
for df in DataFile.objects.all().order_by('-created_at').values()[:100:1]:
if df['ready'] and df['ready_at']:
file_status = {'text': "Ready", 'value': 1}
elif df['ready_at']:
file_status = {'text': "Error", 'value': 2}
else:
file_status = {'text': "Processing", 'value': 0}
current_station_name = None
try:
current_data_file = DataFileStation.objects.get(datafile_id=df['id'])
current_station = Station.objects.get(pk=current_data_file.station_id)
current_station_name = current_station.name
except ObjectDoesNotExist:
current_station_name = "Station not found"
f = {
'id': df['id'],
'request_date': df['created_at'],
'ready_date': df['ready_at'],
'station': current_station_name,
'variables': [],
'status': file_status,
'initial_date': df['initial_date'],
'final_date': df['final_date'],
'source': {'text': df['source'],
'value': 0 if df['source'] == 'Raw data' else (1 if df['source'] == 'Hourly summary' else 2)},
'lines': df['lines'],
'prepared_by': df['prepared_by']
}
if f['ready_date'] is not None:
f['ready_date'] = f['ready_date']
for fv in DataFileVariable.objects.filter(datafile_id=df['id']).values():
f['variables'].append(Variable.objects.filter(pk=fv['variable_id']).values()[0]['name'])
files.append(f)
return Response(files, status=status.HTTP_200_OK)
def DownloadDataFile(request):
file_id = request.GET.get('id', None)
file_path = os.path.join('/data', 'exported_data', str(file_id) + '.csv')
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
return JsonResponse({}, status=status.HTTP_404_NOT_FOUND)
def DeleteDataFile(request):
file_id = request.GET.get('id', None)
df = DataFile.objects.get(pk=file_id)
DataFileStation.objects.filter(datafile=df).delete()
DataFileVariable.objects.filter(datafile=df).delete()
df.delete()
file_path = os.path.join('/data', 'exported_data', str(file_id) + '.csv')
if os.path.exists(file_path):
os.remove(file_path)
return JsonResponse({}, status=status.HTTP_200_OK)
def GetInterpolationData(request):
start_datetime = request.GET.get('start_datetime', None)
end_datetime = request.GET.get('end_datetime', None)
variable_id = request.GET.get('variable_id', None)
agg = request.GET.get('agg', "instant")
source = request.GET.get('source', "raw_data")
quality_flags = request.GET.get('quality_flags', None)
where_query = ""
if source == "raw_data":
dt_query = "datetime"
value_query = "measured"
source_query = "raw_data"
if quality_flags:
try:
[int(qf) for qf in quality_flags.split(',')]
except ValueError:
return JsonResponse({"message": "Invalid quality_flags value."}, status=status.HTTP_400_BAD_REQUEST)
where_query = f" measured != {settings.MISSING_VALUE} AND quality_flag IN ({quality_flags}) AND "
else:
where_query = f" measured != {settings.MISSING_VALUE} AND "
else:
with connection.cursor() as cursor:
cursor.execute("""
SELECT sampling_operation_id
FROM wx_variable
WHERE id=%(variable_id)s;
""",
params={'variable_id': variable_id}
)
sampling_operation = cursor.fetchone()[0]
if sampling_operation in [6, 7]:
value_query = "sum_value"
elif sampling_operation == 3:
value_query = "min_value"
elif sampling_operation == 4:
value_query = "max_value"
else:
value_query = "avg_value"
if source == "hourly":
dt_query = "datetime"
source_query = "hourly_summary"
elif source == "daily":
dt_query = "day"
source_query = "daily_summary"
elif source == "monthly":
dt_query = "date"
source_query = "monthly_summary"
elif source == "yearly":
dt_query = "date"
source_query = "yearly_summary"
if agg == "instant":
where_query += "variable_id=%(variable_id)s AND " + dt_query + "=%(datetime)s"
params = {'datetime': start_datetime, 'variable_id': variable_id}
else:
where_query += "variable_id=%(variable_id)s AND " + dt_query + " >= %(start_datetime)s AND " + dt_query + " <= %(end_datetime)s"
params = {'start_datetime': start_datetime, 'end_datetime': end_datetime, 'variable_id': variable_id}
with connection.cursor() as cursor:
cursor.execute("""
SELECT a.station_id,b.name,b.code,b.latitude,b.longitude,a.""" + value_query + """ as measured
FROM """ + source_query + """ a INNER JOIN wx_station b ON a.station_id=b.id
WHERE """ + where_query + ";",
params=params
)
climate_data = {}
# if agg == "instant":
raw_data = cursor.fetchall()
climate_data['data'] = []
for item in raw_data:
climate_data['data'].append({
'station_id': item[0],
'name': item[1],
'code': item[2],
'latitude': item[3],
'longitude': item[4],
'measured': item[5],
})
if agg != "instant" and len(raw_data) > 0:
columns = ['station_id', 'name', 'code', 'latitude', 'longitude', 'measured']
df_climate = json_normalize([
dict(zip(columns, row))
for row in raw_data
])
climate_data['data'] = json.loads(
df_climate.groupby(['station_id', 'name', 'code', 'longitude', 'latitude']).agg(
agg).reset_index().sort_values('name').to_json(orient="records"))
return JsonResponse(climate_data)
def GetInterpolationImage(request):
start_datetime = request.GET.get('start_datetime', None)
end_datetime = request.GET.get('end_datetime', None)
variable_id = request.GET.get('variable_id', None)
cmap = request.GET.get('cmap', 'Spectral_r')
hres = request.GET.get('hres', 0.01)
minimum_neighbors = request.GET.get('minimum_neighbors', 1)
search_radius = request.GET.get('search_radius', 0.7)
agg = request.GET.get('agg', "instant")
source = request.GET.get('source', "raw_data")
vmin = request.GET.get('vmin', 0)
vmax = request.GET.get('vmax', 30)
quality_flags = request.GET.get('quality_flags', None)
stations_df = pd.read_sql_query("""
SELECT id,name,alias_name,code,latitude,longitude
FROM wx_station
WHERE longitude!=0;
""",
con=connection
)
stations = geopandas.GeoDataFrame(
stations_df, geometry=geopandas.points_from_xy(stations_df.longitude, stations_df.latitude))
stations.crs = 'epsg:4326'
stands_llat = settings.SPATIAL_ANALYSIS_INITIAL_LATITUDE
stands_llon = settings.SPATIAL_ANALYSIS_INITIAL_LONGITUDE
stands_ulat = settings.SPATIAL_ANALYSIS_FINAL_LATITUDE
stands_ulon = settings.SPATIAL_ANALYSIS_FINAL_LONGITUDE
where_query = ""
if source == "raw_data":
dt_query = "datetime"
value_query = "measured"
source_query = "raw_data"
if quality_flags:
try:
[int(qf) for qf in quality_flags.split(',')]
except ValueError:
return JsonResponse({"message": "Invalid quality_flags value."}, status=status.HTTP_400_BAD_REQUEST)
where_query = f" measured != {settings.MISSING_VALUE} AND quality_flag IN ({quality_flags}) AND "
else:
where_query = f" measured != {settings.MISSING_VALUE} AND "
else:
with connection.cursor() as cursor:
cursor.execute("""
SELECT sampling_operation_id
FROM wx_variable
WHERE id=%(variable_id)s;
""",
params={'variable_id': variable_id}
)
sampling_operation = cursor.fetchone()[0]
if sampling_operation in [6, 7]:
value_query = "sum_value"
elif sampling_operation == 3:
value_query = "min_value"
elif sampling_operation == 4:
value_query = "max_value"
else:
value_query = "avg_value"
if source == "hourly":
dt_query = "datetime"
source_query = "hourly_summary"
elif source == "daily":
dt_query = "day"
source_query = "daily_summary"
elif source == "monthly":
dt_query = "date"
source_query = "monthly_summary"
elif source == "yearly":
dt_query = "date"
source_query = "yearly_summary"
if agg == "instant":
where_query += "variable_id=%(variable_id)s AND " + dt_query + "=%(datetime)s"
params = {'datetime': start_datetime, 'variable_id': variable_id}
else:
where_query += "variable_id=%(variable_id)s AND " + dt_query + " >= %(start_datetime)s AND " + dt_query + " <= %(end_datetime)s"
params = {'start_datetime': start_datetime, 'end_datetime': end_datetime, 'variable_id': variable_id}
climate_data = pd.read_sql_query(
"SELECT station_id,variable_id," + dt_query + "," + value_query + """
FROM """ + source_query + """
WHERE """ + where_query + ";",
params=params,
con=connection
)
if len(climate_data) == 0:
with open("/surface/static/images/no-interpolated-data.png", "rb") as f:
img_data = f.read()
return HttpResponse(img_data, content_type="image/jpeg")
df_merged = pd.merge(left=climate_data, right=stations, how='left', left_on='station_id', right_on='id')
df_climate = df_merged[["station_id", dt_query, "longitude", "latitude", value_query]]
if agg != "instant":
df_climate = df_climate.groupby(['station_id', 'longitude', 'latitude']).agg(agg).reset_index()
gx, gy, img = interpolate_to_grid(
df_climate["longitude"],
df_climate["latitude"],
df_climate[value_query],
interp_type='cressman',
minimum_neighbors=int(minimum_neighbors),
hres=float(hres),
search_radius=float(search_radius),
boundary_coords={'west': stands_llon, 'east': stands_ulon, 'south': stands_llat, 'north': stands_ulat}
)
fig = plt.figure(frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
fname = str(uuid.uuid4())
fig.savefig("/surface/static/images/" + fname + ".png", dpi='figure', format='png', transparent=True,
bbox_inches=Bbox.from_bounds(2, 0, 2.333, 4.013))
image1 = cv2.imread("/surface/static/images/" + fname + ".png", cv2.IMREAD_UNCHANGED)
image2 = cv2.imread(settings.SPATIAL_ANALYSIS_SHAPE_FILE_PATH, cv2.IMREAD_UNCHANGED)
image1 = cv2.resize(image1, dsize=(image2.shape[1], image2.shape[0]))
for i in range(image1.shape[0]):
| |
Specify the Security Group category name. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Right]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_group_rights_for_current_user_by_category_name_v1_with_http_info(categoryname, **kwargs) # noqa: E501
def security_get_security_group_rights_for_current_user_by_category_name_v1_with_http_info(self, categoryname, **kwargs): # noqa: E501
"""Get current user's permissions by Security Group category by name # noqa: E501
Operation to get permissions for the current user's Security Group by category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_rights_for_current_user_by_category_name_v1_with_http_info(categoryname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str categoryname: Specify the Security Group category name. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Right], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['categoryname'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_rights_for_current_user_by_category_name_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'categoryname' is set
if self.api_client.client_side_validation and ('categoryname' not in local_var_params or # noqa: E501
local_var_params['categoryname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `categoryname` when calling `security_get_security_group_rights_for_current_user_by_category_name_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'categoryname' in local_var_params:
path_params['categoryname'] = local_var_params['categoryname'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getsecuritygrouprightsforcurrentuserbycategoryname/categoryname/{categoryname}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Right]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_group_rights_for_current_user_by_category_name_v2(self, categoryname, **kwargs): # noqa: E501
"""Get current user's permissions by Security Group category by name # noqa: E501
Operation to get permissions for the current user's Security Group by category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_rights_for_current_user_by_category_name_v2(categoryname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str categoryname: Specify the Security Group category name. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SecurityRightsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_group_rights_for_current_user_by_category_name_v2_with_http_info(categoryname, **kwargs) # noqa: E501
def security_get_security_group_rights_for_current_user_by_category_name_v2_with_http_info(self, categoryname, **kwargs): # noqa: E501
"""Get current user's permissions by Security Group category by name # noqa: E501
Operation to get permissions for the current user's Security Group by category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_rights_for_current_user_by_category_name_v2_with_http_info(categoryname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str categoryname: Specify the Security Group category name. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SecurityRightsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['categoryname'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_rights_for_current_user_by_category_name_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'categoryname' is set
if self.api_client.client_side_validation and ('categoryname' not in local_var_params or # noqa: E501
local_var_params['categoryname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `categoryname` when calling `security_get_security_group_rights_for_current_user_by_category_name_v2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'categoryname' in local_var_params:
path_params['categoryname'] = local_var_params['categoryname'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/getsecuritygrouprightsforcurrentuserbycategoryname/categoryname/{categoryname}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecurityRightsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_groups_v1(self, **kwargs): # noqa: E501
"""Get all available Security Groups # noqa: E501
Operation to get IDs, names, and descriptions for all available Security Groups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_groups_v1(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SecurityGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_groups_v1_with_http_info(**kwargs) # noqa: E501
def security_get_security_groups_v1_with_http_info(self, **kwargs): # noqa: E501
"""Get all available Security Groups # noqa: E501
Operation to get IDs, names, and descriptions for all available Security Groups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_groups_v1_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SecurityGroupResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_groups_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getsecuritygroups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecurityGroupResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_groups_v2(self, **kwargs): # noqa: E501
"""Get all available Security Groups # noqa: E501
Operation to get IDs, names, and descriptions for all available Security Groups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_groups_v2(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, | |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import children
class route(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-opstest - based on the path /opstest-state/routes/route. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__destPrefix','__entryMultipathIndex','__prefixLength','__gateway','__outInterfaceName','__outLabel','__protocol','__vifIndex','__metric','__useCount','__children',)
_yang_name = 'route'
_rest_name = 'route'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__vifIndex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vifIndex", rest_name="vifIndex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
self.__protocol = YANGDynClass(base=unicode, is_leaf=True, yang_name="protocol", rest_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)
self.__entryMultipathIndex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="entryMultipathIndex", rest_name="entryMultipathIndex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
self.__destPrefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="destPrefix", rest_name="destPrefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)
self.__outInterfaceName = YANGDynClass(base=unicode, is_leaf=True, yang_name="outInterfaceName", rest_name="outInterfaceName", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)
self.__outLabel = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="outLabel", rest_name="outLabel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
self.__children = YANGDynClass(base=YANGListType("keyid",children.children, yang_name="children", rest_name="children", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='keyid', extensions={u'tailf-common': {u'callpoint': u'MplstestRouteChildren'}}), is_container='list', yang_name="children", rest_name="children", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'MplstestRouteChildren'}}, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='list', is_config=False)
self.__useCount = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="useCount", rest_name="useCount", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
self.__prefixLength = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="prefixLength", rest_name="prefixLength", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
self.__gateway = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="gateway", rest_name="gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'opstest-state', u'routes', u'route']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'opstest-state', u'routes', u'route']
def _get_destPrefix(self):
"""
Getter method for destPrefix, mapped from YANG variable /opstest_state/routes/route/destPrefix (inet:ipv4-address)
"""
return self.__destPrefix
def _set_destPrefix(self, v, load=False):
"""
Setter method for destPrefix, mapped from YANG variable /opstest_state/routes/route/destPrefix (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_destPrefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destPrefix() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="destPrefix", rest_name="destPrefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """destPrefix must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="destPrefix", rest_name="destPrefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__destPrefix = t
if hasattr(self, '_set'):
self._set()
def _unset_destPrefix(self):
self.__destPrefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="destPrefix", rest_name="destPrefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)
def _get_entryMultipathIndex(self):
"""
Getter method for entryMultipathIndex, mapped from YANG variable /opstest_state/routes/route/entryMultipathIndex (uint32)
"""
return self.__entryMultipathIndex
def _set_entryMultipathIndex(self, v, load=False):
"""
Setter method for entryMultipathIndex, mapped from YANG variable /opstest_state/routes/route/entryMultipathIndex (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_entryMultipathIndex is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_entryMultipathIndex() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="entryMultipathIndex", rest_name="entryMultipathIndex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """entryMultipathIndex must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="entryMultipathIndex", rest_name="entryMultipathIndex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)""",
})
self.__entryMultipathIndex = t
if hasattr(self, '_set'):
self._set()
def _unset_entryMultipathIndex(self):
self.__entryMultipathIndex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="entryMultipathIndex", rest_name="entryMultipathIndex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
def _get_prefixLength(self):
"""
Getter method for prefixLength, mapped from YANG variable /opstest_state/routes/route/prefixLength (uint32)
"""
return self.__prefixLength
def _set_prefixLength(self, v, load=False):
"""
Setter method for prefixLength, mapped from YANG variable /opstest_state/routes/route/prefixLength (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefixLength is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefixLength() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="prefixLength", rest_name="prefixLength", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefixLength must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="prefixLength", rest_name="prefixLength", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)""",
})
self.__prefixLength = t
if hasattr(self, '_set'):
self._set()
def _unset_prefixLength(self):
self.__prefixLength = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="prefixLength", rest_name="prefixLength", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='uint32', is_config=False)
def _get_gateway(self):
"""
Getter method for gateway, mapped from YANG variable /opstest_state/routes/route/gateway (inet:ipv4-address)
"""
return self.__gateway
def _set_gateway(self, v, load=False):
"""
Setter method for gateway, mapped from YANG variable /opstest_state/routes/route/gateway (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gateway() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="gateway", rest_name="gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """gateway must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="gateway", rest_name="gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__gateway = t
if hasattr(self, '_set'):
self._set()
def _unset_gateway(self):
self.__gateway = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="gateway", rest_name="gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='inet:ipv4-address', is_config=False)
def _get_outInterfaceName(self):
"""
Getter method for outInterfaceName, mapped from YANG variable /opstest_state/routes/route/outInterfaceName (string)
"""
return self.__outInterfaceName
def _set_outInterfaceName(self, v, load=False):
"""
Setter method for outInterfaceName, mapped from YANG variable /opstest_state/routes/route/outInterfaceName (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_outInterfaceName is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_outInterfaceName() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="outInterfaceName", rest_name="outInterfaceName", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """outInterfaceName must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="outInterfaceName", rest_name="outInterfaceName", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)""",
})
self.__outInterfaceName = t
if hasattr(self, '_set'):
self._set()
def _unset_outInterfaceName(self):
self.__outInterfaceName = YANGDynClass(base=unicode, is_leaf=True, yang_name="outInterfaceName", rest_name="outInterfaceName", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)
def _get_outLabel(self):
"""
Getter method for outLabel, mapped from YANG variable /opstest_state/routes/route/outLabel (uint32)
"""
return self.__outLabel
def _set_outLabel(self, v, load=False):
"""
Setter method for outLabel, mapped from YANG variable /opstest_state/routes/route/outLabel (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_outLabel is considered as a private
method. Backends looking to populate this | |
# -*- encoding: utf-8 -*-
from __future__ import print_function
import sys
from copy import deepcopy
from collections import defaultdict
from itertools import chain, izip
import jsparser
def pairs(iterable):
it = iter(iterable)
e1 = next(it)
for e2 in it:
yield e1, e2
e1 = e2
def childs(node):
for a in BaseVisitor.CHILD_ATTRS:
child = getattr(node, a, None)
# a='value' может содержать не только узел.
if child and isinstance(child, jsparser.Node):
yield child, a
for i, n in enumerate(node): yield n, i
jsparser.Node._childs = property(childs)
class GraphNode(object):
def __init__(self, name):
self.name = name
def __str__(self):
l = [x for x in dir(self) if x.startswith('g_')]
if len(l):
return '%s [%s]' % (self.name, ','.join(map(lambda x: '%s="%s"' % (x[2:], getattr(self, x)), l)))
return self.name
__repr__ = __str__
class BeginNode(GraphNode):
def __init__(self):
super(BeginNode, self).__init__('begin')
g_shape = 'ellipse'
class EndNode(GraphNode):
def __init__(self):
super(EndNode, self).__init__('end')
g_shape = 'ellipse'
class ConditionNode(GraphNode):
def __init__(self, name):
super(ConditionNode, self).__init__(name)
g_shape = 'diamond'
class ActionNode(GraphNode):
def __init__(self, name):
super(ActionNode, self).__init__(name)
class GeneralRequestNode(ActionNode):
def __init__(self, name):
super(GeneralRequestNode, self).__init__(name)
g_fillcolor = '#99CC33'
g_style = 'filled'
class SubGraph(object):
def __init__(self, name, parent=None):
assert parent == None or isinstance(parent, SubGraph), type(parent)
self.name = name
self.nodes = [] # GraphNode
self.subgraphs = [] # SubGraph
self.__actions = [] # GraphNode | SubGraph
self.parent = parent
if parent:
parent.subgraphs.append(self)
######################################
# Геттеры
######################################
@property
def entryPoint(self):
return self.__actions[0]
@property
def endPoint(self):
return self.__actions[-1]
@property
def edges(self):
return pairs(self.__actions)
def subgraph(self, name):
sg = self
while sg is not None:
for g in sg.subgraphs:
if g.name == name: return g
sg = sg.parent
raise Exception('No graph with name '+name)
@property
def _actions(self):
return chain(self.__actions, (self.__endPoint,))
######################################
def addAction(self, name):
self._addNode(ActionNode(name))
def addCall(self, g):
self.__add(g)
def _addNode(self, node):
self.nodes.append(node)
self.__add(node)
def __add(self, endNode):
# Выделяем первое действие в цепочке.
if not len(self.__actions) and isinstance(endNode, ActionNode):
endNode.g_style = 'filled'
endNode.g_fillcolor = '#99CC33'
self.__actions.append(endNode)
def toDot(self, indent=0, out=sys.stdout):
i = ' '*indent
print('%s// Функция %s()' % (i, self.name), file=out)
print(self._header() % (i, self.name), file=out)
print('%s node [shape="rectangle"];' % i, file=out)
print('%s label="%s";' % (i, self._label()), file=out)
if len(self.nodes):
print(file=out)
for n in self.nodes:
print('%s %s;' % (i, n), file=out)
print(file=out)
for n1, n2 in self.edges:
try:
while isinstance(n1, SubGraph): n1 = n1.endPoint
while isinstance(n2, SubGraph): n2 = n2.entryPoint
print('%s %s -> %s;' % (i, n1.name, n2.name), file=out)
except IndexError: break
if len(self.subgraphs):
print(file=out)
for g in self.subgraphs:
g.toDot(indent+1, out)
print('%s}' % i, file=out)
def _header(self):
return '%ssubgraph cluster%s {';
def _label(self):
return 'function %s()' % self.name
# @property
# def nodes(self):
# for a in self.__actions:
# if isinstance(a, GraphNode): yield a
class Graph(SubGraph):
def __init__(self):
super(Graph, self).__init__('Script')
self._addNode(BeginNode())
self.nodes.append(EndNode())
def _header(self):
return '%sdigraph %s {';
def _label(self):
return self.name
##########################################################################
class BaseVisitor(object):
# value используется в RETURN
CHILD_ATTRS = ['condition', 'thenPart', 'elsePart', 'expression', 'body', 'initializer', 'value']
def __noop(self, node): pass
def visit(self, node):
# Получаем функцию вида visit_<тип_узла> у данного класса, которая
# применяется к передаваемому узлу.
call = lambda n: getattr(self, "visit_%s" % n.type, self.__noop)(n)
r = call(node)
if r: return r
# Обходим все внутренние узлы с действиями
for n,i in node._childs:
if self.filter(n):
r = self.visit(n)
if r: return r
call = lambda n: getattr(self, "after_visit_%s" % n.type, self.__noop)(n)
return call(node)
def filter(self, node):
return True
@staticmethod
def _name(node):
if node.type in ('IDENTIFIER', 'NUMBER'): return node.value
if node.type == 'FUNCTION': return getattr(node, 'name', '<anonymous>')
if node.type == 'CALL': return BaseVisitor._name(node[0])
if node.type == 'DOT': return '.'.join((n.value for n in node))
if node.type == 'STRING': return '"%s"' % node.value
return ''
##########################################################################
# Печатает дерево узлов в компактном виде.
class PrintVisitor(BaseVisitor):
def __init__(self, node=None):
self.__indent = 0
if node:
self.visit(node)
def visit(self, node):
print('%s%s %s' % (' '*self.__indent, node.type, self._name(node)))
self.__indent += 1
super(PrintVisitor, self).visit(node)
self.__indent -= 1
# Добавляет всем узлам графа скрипта ссылку на родительский узел.
class AddParentVisitor(BaseVisitor):
def __init__(self, node):
super(AddParentVisitor, self).__init__()
self.__current = None
node._parent = None
self.visit(node)
self.addChangeHook()
def visit(self, node):
node._parent = self.__current
self.__current = node
super(AddParentVisitor, self).visit(node)
self.__current = node._parent
@staticmethod
def addChangeHook():
old__setattr__ = jsparser.Node.__setattr__
def hook(self, name, obj):
old__setattr__(self, name, obj)
if name in BaseVisitor.CHILD_ATTRS and isinstance(obj, jsparser.Node):
# print('%s.%s <= %s' % (getattr(self, 'type', None), name, getattr(obj, 'type', None)))
old__setattr__(obj, '_parent', self)
jsparser.Node.__setattr__ = hook
# jsparser.Node.parent = property(lambda self: self._parent)
def where(node):
if not node._parent: return None
try:
return next(i for i, n in enumerate(node._parent) if n is node)
# return node._parent.index(node)
# except ValueError:
except StopIteration:
for attr in BaseVisitor.CHILD_ATTRS:
child = getattr(node._parent, attr, None)
if child is node:
return attr
raise ValueError()
jsparser.Node._where = property(where)
def removeNode(node, nodeForRemove):
i = nodeForRemove._where
if isinstance(i, int):
del node[i]
else:
setattr(node, i, None)
jsparser.Node._removeNode = removeNode
jsparser.Node._neighbours = property(pairs)
def equals(node1, node2):
if node1 is node2: return True
if node1 is None and node2 is not None: return False
if node1.type != node2.type: return False
if len(node1) != len(node2): return False
name1 = getattr(node1, 'name', None)
name2 = getattr(node2, 'name', None)
if name1 != name2: return False
name1 = getattr(node1, 'value', None)
name2 = getattr(node2, 'value', None)
if name1 != name2: return False
# return node1 == node2
for n1, n2 in izip(node1, node2):
if not equals(n1, n2): return False
return True
jsparser.Node._equals = equals
# jsparser.Node.__eq__ = equals
# Рефакторит AST для более удобного составления code-flow.
class RefactorVisitor(BaseVisitor):
class Tokenizer(object):
token = None
lineno= -1
def __init__(self, node):
self.visit(node)
def visit(self, node):
self.__chainIf(node)
super(RefactorVisitor, self).visit(node)
def visit_IF(self, node):
# Удаляет отрицание из условия.
if node.condition.type == 'NOT':
node.thenPart,node.elsePart = node.elsePart,node.thenPart
node.condition = node.condition[0]# Условие под NOT
node.condition._parent = node
return self.visit_IF(node, part)
elif node.condition.type == 'AND':
self.__splitANDNode(node)
return self.visit_IF(node, part)
# elif node.condition.type == 'OR':
# self.__splitORNode(node)
# return self.visit_IF(node, part)
elif node.condition.type == 'CALL':
self.__popCALL(node)
return self.visit_IF(node, part)
elif node.condition.type == 'TRUE':
self.__replaceNode(node._parent, node, node.thenPart)
self.visit(node._parent)
return True
elif node.condition.type in ('FALSE', 'NULL'):
self.__replaceNode(node._parent, node, node.elsePart)
self.visit(node._parent)
return True
def visit_WHILE(self, node):
if node.condition.type in ('NUMBER', 'TRUE', 'FALSE', 'NULL'):
return
node.body = self.__createNode('if',
condition=node.condition,
thenPart=node.body,
elsePart=self.__createNode('break')
)
node.condition = self.__createNode('true')
def visit_DO(self, node):
if node.condition.type in ('NUMBER', 'TRUE', 'FALSE', 'NULL'):
return
node.body = self.__createNode('BLOCK',
node.body,
self.__createNode('if',
condition=node.condition,
thenPart =self.__createNode('break')
)
)
node.condition = self.__createNode('true')
def __splitANDNode(self, oldIf):
"""
# Пример рефакторинга
def test(x): # До
if x>1 and x<3: print('then')
else: print('else')
def test(x): # После
if x>1: # oldIf
if x<3: print('then')# newIf
else: print('else')
else:
print('else')
"""
# Легче было бы создать внешний if, так как внутренний - это исходный
# со второй частью условия. Но при этом невозможно протолкнуть изменения
# вверх по дереву, поэтому создаем новый внутренний if.
newIf = self.__createNode('if',
condition= oldIf.condition[1],
thenPart = oldIf.thenPart,
elsePart = deepcopy(oldIf.elsePart)
)
# Корректируем внешний if
oldIf.condition = oldIf.condition[0]
oldIf.thenPart = newIf
def __splitORNode(self, oldIf):
"""
# Пример рефакторинга
def test(x): # До
if x<1 or x>3: print('then')
else: print('else')
def test(x): # После
if x<1: print('then')# oldIf
else:
if x>3: print('then')# newIf
else: print('else')
"""
newIf = self.__createNode('if',
condition = oldIf.condition[1],
thenPart = deepcopy(oldIf.thenPart),
elsePart = oldIf.elsePart
)
oldIf.condition = oldIf.condition[0]
oldIf.elsePart = newIf
def __popCALL(self, oldIf):
"""
# Пример рефакторинга
def test(f): # До
if f(): print('then')
else: print('else')
def test(f): # После
# Фиктивный if, т.к. менять можно только детей узла,
# а у нас добавляется 1 узел на том же уровне.
if True:# oldIf
x = f()
if x: print('then')# newIf
else: print('else')
"""
# Объявляем переменную и инициализируем ее условием
var = self.__createNode('var',
self.__createNode('IDENTIFIER', value='lastResult', initializer=oldIf.condition)
)
# Создаем новое условие, проверяющее указанную переменную
newIf = self.__createNode('if',
condition = self.__createNode('IDENTIFIER', value='lastResult'),
thenPart = oldIf.thenPart,
elsePart = oldIf.elsePart
)
# Меняем старое условие и прицепляем два созданных куска кода.
oldIf.condition = self.__createNode('true')
oldIf.thenPart = self.__createNode('BLOCK', var, newIf)
oldIf.elsePart = None
# Если подряд идут 2 if-а, причем у первого отсутствует часть else,
# и они оба проверяют одну и ту же переменную на равенство, то второй
# if можно прицепить к первому в часть else, если проверяемая переменная
# не изменяется в теле первого if-а (здесь эта проверка не производится).
def __chainIf(self, node):
for n1, n2 in reversed(list(node._neighbours)):
if n1.type == 'IF' and n2.type == 'IF':
PrintVisitor(n1.condition)
PrintVisitor(n2.condition)
print(('---------------'))
if n1.elsePart | |
]
pie_left_list = [
'left',
'frontleft',
'front',
'frontright',
'right',
'backright',
'back',
'backleft',
]
pie_right_list = [
'right',
'backright',
'back',
'backleft',
'left',
'frontleft',
'front',
'frontright',
]
viewpoint_tag_dict = {
'zebra_grevys': pie_right_list,
'zebra_plains': pie_left_list,
'giraffe_masai': pie_left_list,
}
pie_label_corrected_list = list(
map(str, ['Correct', '+45', '+90', '+135', '+180', '+225', '+270', '+315'])
)
pie_values_corrected_list = [
(
species_nice_dict[species],
[viewpoint_dict[species][_] for _ in viewpoint_tag_dict[species]],
)
for species in species_tag_list
]
gid_list = ibs.get_valid_gids()
note_list = ibs.get_image_notes(gid_list)
aids_list = ibs.get_image_aids(gid_list)
viewpoints_list = ut.unflat_map(ibs.get_annot_viewpoints, aids_list)
dataset_tag_list = ['GGR', 'GZGC']
pie_label_images_list = ['Correct Viewpoint', '+/- 45 Viewpoint', 'Unused']
pie_values_images_dict = {dataset_tag: [0, 0, 0] for dataset_tag in dataset_tag_list}
allowed_viewpoint_dict = {
'GGR': ['right', 'frontright', 'backright'],
'GZGC': ['left', 'frontleft', 'backleft'],
}
for note, viewpoint_list in list(zip(note_list, viewpoints_list)):
dataset_tag = 'GGR' if 'GGR' in note else 'GZGC'
allowed_viewpoint_list = allowed_viewpoint_dict[dataset_tag]
found = False
for index, allowed_viewpoint in enumerate(allowed_viewpoint_list):
if allowed_viewpoint in viewpoint_list:
found = True
if index == 0:
pie_values_images_dict[dataset_tag][0] += 1
else:
pie_values_images_dict[dataset_tag][1] += 1
break
if not found:
pie_values_images_dict[dataset_tag][2] += 1
pie_values_images_list = [(_, pie_values_images_dict[_]) for _ in dataset_tag_list]
nid_list = ibs.get_valid_nids()
aids_list = ibs.get_name_aids(nid_list)
species_list = map(
list, map(set, ut.unflat_map(ibs.get_annot_species_texts, aids_list))
)
species_list = [None if len(_) != 1 else _[0] for _ in species_list]
num_bins = 15
histogram_dict = {}
for nid, aids, species in list(zip(nid_list, aids_list, species_list)):
if species not in histogram_dict:
histogram_dict[species] = {}
count = len(aids)
count = min(count, num_bins)
if count not in histogram_dict[species]:
histogram_dict[species][count] = 0
histogram_dict[species][count] += 1
histogram_bins = list(range(1, num_bins + 1))
bar_label_list = [
'%s+' % (bin_,) if bin_ == histogram_bins[-1] else '%s' % (bin_,)
for bin_ in histogram_bins
]
bar_values_dict = {}
for species in histogram_dict:
bar_values_dict[species] = []
for bin_ in histogram_bins:
value = histogram_dict[species].get(bin_, 0)
if species == 'zebra_plains':
value2 = histogram_dict['giraffe_masai'].get(bin_, 0)
value += value2
bar_values_dict[species].append(value)
# Get number of annotations per name as a histogram for each species
embedded = dict(globals(), **locals())
return appf.template('view', 'advanced1', __wrapper_header__=False, **embedded)
@register_route('/view/advanced/2/', methods=['GET'])
def view_advanced2(**kwargs):
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime) if unixtime is not None else 'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [datetime.split(' ') for datetime in datetime_list]
date_list = [
datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN'
for datetime_split in datetime_split_list
]
return date_list
def filter_annots_imageset(aid_list):
if not aid_list: # no need to filter if empty
return aid_list
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except Exception:
logger.info('ERROR PARSING IMAGESET ID FOR ANNOTATION FILTERING')
return aid_list
imgsetids_list = ibs.get_annot_imgsetids(aid_list)
aid_list = [
aid
for aid, imgsetid_list_ in list(zip(aid_list, imgsetids_list))
if imgsetid in imgsetid_list_
]
return aid_list
def filter_annots_general(ibs, aid_list):
if not aid_list: # no need to filter if empty
return aid_list
if ibs.dbname == 'GGR-IBEIS':
# Grevy's
filter_kw = {
'multiple': None,
'minqual': 'good',
'is_known': True,
'min_pername': 1,
'species': 'zebra_grevys',
'view': ['right'],
}
aid_list1 = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
# aid_list1 = []
# Plains
filter_kw = {
'multiple': None,
'minqual': 'ok',
'is_known': True,
'min_pername': 1,
'species': 'zebra_plains',
'view': ['left'],
}
aid_list2 = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list2 = []
# Masai
filter_kw = {
'multiple': None,
'minqual': 'ok',
'is_known': True,
'min_pername': 1,
'species': 'giraffe_masai',
'view': ['left'],
}
aid_list3 = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list3 = []
aid_list = aid_list1 + aid_list2 + aid_list3
# aid_list = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
else:
assert ibs.dbname == 'GGR2-IBEIS'
aid_list = ibs.check_ggr_valid_aids(
aid_list, species='zebra_grevys', threshold=0.75
)
return aid_list
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
aid_list = filter_annots_general(ibs, aid_list)
# aid_list = filter_annots_imageset(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
unixtime_list = ibs.get_image_unixtime(gid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
flagged_date_list = ['2015/03/01', '2015/03/02', '2016/01/30', '2016/01/31']
index = 0
value = 0
line_index_list = []
line_label_list = []
line_value_list = []
seen_set = set()
last_date = None
for unixtime, aid, nid, date, species in sorted(
list(zip(unixtime_list, aid_list, nid_list, date_list, species_list))
):
# if flagged_date_list is not None and date not in flagged_date_list:
# continue
index += 1
line_index_list.append(index)
# Add to counters
if nid not in seen_set:
seen_set.add(nid)
value += 1
# Add to register
line_value_list.append(value)
# Reset step (per day)
if index % 1000 == 0:
line_label_list.append(index)
else:
line_label_list.append('')
# if date != last_date and date != 'UNKNOWN':
# last_date = date
# # line_label_list.append(date)
# line_label_list.append('')
# else:
# line_label_list.append('')
# Get number of annotations per name as a histogram for each species
embedded = dict(globals(), **locals())
return appf.template('view', 'advanced2', __wrapper_header__=False, **embedded)
@register_route('/view/advanced/3/', methods=['GET'])
def view_advanced3(**kwargs):
ibs = current_app.ibs
gid_list = ibs.get_valid_gids()
# gid_list = gid_list[:100] + gid_list[-100:]
note_list = ibs.get_image_notes(gid_list)
contrib_dict = {}
skipped = 0
for gid, note in list(zip(gid_list, note_list)):
note = note.strip()
if len(note) == 0:
skipped += 1
continue
dataset_tag = 'GGR' if 'GGR' in note else 'GZGC'
if dataset_tag not in contrib_dict:
contrib_dict[dataset_tag] = {}
if dataset_tag == 'GGR':
note_ = note.strip().split(',')
car, letter = note_[1:3]
else:
note_ = note.strip().split(',')
car, letter = note_[0:2]
car = car.split(' ')[-1].strip("'")
letter = letter.split(' ')[-1].strip("'")
if car not in contrib_dict[dataset_tag]:
contrib_dict[dataset_tag][car] = {}
if letter not in contrib_dict[dataset_tag][car]:
contrib_dict[dataset_tag][car][letter] = 0
contrib_dict[dataset_tag][car][letter] += 1
max_size = 0
for dataset_tag in contrib_dict:
temp_list = []
for car in contrib_dict[dataset_tag]:
letter_dict = contrib_dict[dataset_tag][car]
combined_list = list([(_[1], _[0]) for _ in letter_dict.items()])
combined_list = sorted(combined_list, reverse=True)
letter_list = [_[1] for _ in combined_list]
total = sum(letter_dict.values())
temp_list.append((total, car, letter_list))
temp_list = sorted(temp_list, reverse=True)
max_size = max(max_size, len(temp_list))
contrib_dict[dataset_tag]['__MANIFEST__'] = temp_list
max_show = 30
# bar_label_list = [''] * max_show
bar_label_list = list(range(1, max_show + 1))
bar_values_dict = {}
for dataset_tag in contrib_dict:
values = [_[0] for _ in contrib_dict[dataset_tag]['__MANIFEST__']]
padding = max_size - len(values)
values = values + [0] * padding
values = values[:max_show]
bar_values_dict[dataset_tag] = values
for dataset_tag in contrib_dict:
logger.info(dataset_tag)
total_cars = 0
total_letters = 0
total_images = 0
for car in contrib_dict[dataset_tag]:
if car == '__MANIFEST__':
continue
total_cars += 1
for letter in contrib_dict[dataset_tag][car]:
total_letters += 1
total_images += contrib_dict[dataset_tag][car][letter]
logger.info(total_cars)
logger.info(total_letters)
logger.info(total_images)
logger.info(skipped)
# Get number of annotations per name as a histogram for each species
embedded = dict(globals(), **locals())
return appf.template('view', 'advanced3', __wrapper_header__=False, **embedded)
@register_route('/view/advanced/4/', methods=['GET'])
def view_advanced4(**kwargs):
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime) if unixtime is not None else 'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [datetime.split(' ') for datetime in datetime_list]
date_list = [
datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN'
for datetime_split in datetime_split_list
]
return date_list
def filter_species_of_interest(gid_list):
if not gid_list: # no need to filter if empty
return gid_list
wanted_set = set(['zebra_plains', 'zebra_grevys', 'giraffe_masai'])
aids_list = ibs.get_image_aids(gid_list)
speciess_list = ut.unflat_map(ibs.get_annot_species_texts, aids_list)
speciess_set = map(set, speciess_list)
gid_list_filtered = []
for gid, species_set in list(zip(gid_list, speciess_set)):
intersect_list = list(wanted_set & species_set)
if len(intersect_list) > 0:
gid_list_filtered.append(gid)
return gid_list_filtered
def filter_viewpoints_of_interest(gid_list, allowed_viewpoint_list):
if not gid_list: # no need to filter if empty
return gid_list
aids_list = ibs.get_image_aids(gid_list)
wanted_set = set(allowed_viewpoint_list)
viewpoints_list = ut.unflat_map(ibs.get_annot_viewpoints, aids_list)
viewpoints_list = map(set, viewpoints_list)
gid_list_filtered = []
for gid, viewpoint_set in list(zip(gid_list, viewpoints_list)):
intersect_list = list(wanted_set & viewpoint_set)
if len(intersect_list) > 0:
gid_list_filtered.append(gid)
return gid_list_filtered
def filter_bad_metadata(gid_list):
if not gid_list: # no need to filter if empty
return gid_list
wanted_set = set(['2015/03/01', '2015/03/02', '2016/01/30', '2016/01/31'])
date_list = _date_list(gid_list)
gps_list = ibs.get_image_gps(gid_list)
gid_list_filtered = []
for gid, date, gps in list(zip(gid_list, date_list, gps_list)):
if date in wanted_set and gps != (-1.0, -1.0):
gid_list_filtered.append(gid)
return gid_list_filtered
def filter_bad_quality(gid_list, allowed_quality_list):
if not gid_list: # no need to filter if empty
return gid_list
aids_list = ibs.get_image_aids(gid_list)
wanted_set = set(allowed_quality_list)
qualities_list = ut.unflat_map(ibs.get_annot_quality_texts, aids_list)
qualities_list = map(set, qualities_list)
gid_list_filtered = []
for gid, quality_list in list(zip(gid_list, qualities_list)):
intersect_list = list(wanted_set & quality_list)
if len(intersect_list) > 0:
gid_list_filtered.append(gid)
return gid_list_filtered
# def filter_singletons(gid_list):
# aids_list = ibs.get_image_aids(gid_list)
# nids_list = ut.unflat_map(ibs.get_annot_nids, aids_list)
# gid_list_filtered = []
# for gid, nid_list in list(zip(gid_list, nids_list)):
# logger.info(gid)
# logger.info(nid_list)
# aids_list_ = ibs.get_name_aids(nid_list)
# logger.info(aids_list_)
# single = True
# for nid, aid_list in list(zip(nid_list, aids_list_)):
# if nid == const.UNKNOWN_NAME_ROWID or nid < 0:
# continue
# if len(aid_list) > 1:
# | |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2ONaiveBayesEstimator(H2OEstimator):
"""
Naive Bayes
The naive Bayes classifier assumes independence between predictor variables
conditional on the response, and a Gaussian distribution of numeric predictors with
mean and standard deviation computed from the training dataset. When building a naive
Bayes classifier, every row in the training dataset that contains at least one NA will
be skipped completely. If the test dataset has missing values, then those predictors
are omitted in the probability calculation during prediction.
"""
algo = "naivebayes"
supervised_learning = True
def __init__(self,
model_id=None, # type: Optional[Union[None, str, H2OEstimator]]
nfolds=0, # type: int
seed=-1, # type: int
fold_assignment="auto", # type: Literal["auto", "random", "modulo", "stratified"]
fold_column=None, # type: Optional[str]
keep_cross_validation_models=True, # type: bool
keep_cross_validation_predictions=False, # type: bool
keep_cross_validation_fold_assignment=False, # type: bool
training_frame=None, # type: Optional[Union[None, str, H2OFrame]]
validation_frame=None, # type: Optional[Union[None, str, H2OFrame]]
response_column=None, # type: Optional[str]
ignored_columns=None, # type: Optional[List[str]]
ignore_const_cols=True, # type: bool
score_each_iteration=False, # type: bool
balance_classes=False, # type: bool
class_sampling_factors=None, # type: Optional[List[float]]
max_after_balance_size=5.0, # type: float
max_confusion_matrix_size=20, # type: int
laplace=0.0, # type: float
min_sdev=0.001, # type: float
eps_sdev=0.0, # type: float
min_prob=0.001, # type: float
eps_prob=0.0, # type: float
compute_metrics=True, # type: bool
max_runtime_secs=0.0, # type: float
export_checkpoints_dir=None, # type: Optional[str]
gainslift_bins=-1, # type: int
auc_type="auto", # type: Literal["auto", "none", "macro_ovr", "weighted_ovr", "macro_ovo", "weighted_ovo"]
):
"""
:param model_id: Destination id for this model; auto-generated if not specified.
Defaults to ``None``.
:type model_id: Union[None, str, H2OEstimator], optional
:param nfolds: Number of folds for K-fold cross-validation (0 to disable or >= 2).
Defaults to ``0``.
:type nfolds: int
:param seed: Seed for pseudo random number generator (only used for cross-validation and
fold_assignment="Random" or "AUTO")
Defaults to ``-1``.
:type seed: int
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified. The
'Stratified' option will stratify the folds based on the response variable, for classification problems.
Defaults to ``"auto"``.
:type fold_assignment: Literal["auto", "random", "modulo", "stratified"]
:param fold_column: Column with cross-validation fold index assignment per observation.
Defaults to ``None``.
:type fold_column: str, optional
:param keep_cross_validation_models: Whether to keep the cross-validation models.
Defaults to ``True``.
:type keep_cross_validation_models: bool
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models.
Defaults to ``False``.
:type keep_cross_validation_predictions: bool
:param keep_cross_validation_fold_assignment: Whether to keep the cross-validation fold assignment.
Defaults to ``False``.
:type keep_cross_validation_fold_assignment: bool
:param training_frame: Id of the training data frame.
Defaults to ``None``.
:type training_frame: Union[None, str, H2OFrame], optional
:param validation_frame: Id of the validation data frame.
Defaults to ``None``.
:type validation_frame: Union[None, str, H2OFrame], optional
:param response_column: Response variable column.
Defaults to ``None``.
:type response_column: str, optional
:param ignored_columns: Names of columns to ignore for training.
Defaults to ``None``.
:type ignored_columns: List[str], optional
:param ignore_const_cols: Ignore constant columns.
Defaults to ``True``.
:type ignore_const_cols: bool
:param score_each_iteration: Whether to score during each iteration of model training.
Defaults to ``False``.
:type score_each_iteration: bool
:param balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data).
Defaults to ``False``.
:type balance_classes: bool
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order). If not
specified, sampling factors will be automatically computed to obtain class balance during training.
Requires balance_classes.
Defaults to ``None``.
:type class_sampling_factors: List[float], optional
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be
less than 1.0). Requires balance_classes.
Defaults to ``5.0``.
:type max_after_balance_size: float
:param max_confusion_matrix_size: [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
the Logs
Defaults to ``20``.
:type max_confusion_matrix_size: int
:param laplace: Laplace smoothing parameter
Defaults to ``0.0``.
:type laplace: float
:param min_sdev: Min. standard deviation to use for observations with not enough data
Defaults to ``0.001``.
:type min_sdev: float
:param eps_sdev: Cutoff below which standard deviation is replaced with min_sdev
Defaults to ``0.0``.
:type eps_sdev: float
:param min_prob: Min. probability to use for observations with not enough data
Defaults to ``0.001``.
:type min_prob: float
:param eps_prob: Cutoff below which probability is replaced with min_prob
Defaults to ``0.0``.
:type eps_prob: float
:param compute_metrics: Compute metrics on training data
Defaults to ``True``.
:type compute_metrics: bool
:param max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
Defaults to ``0.0``.
:type max_runtime_secs: float
:param export_checkpoints_dir: Automatically export generated models to this directory.
Defaults to ``None``.
:type export_checkpoints_dir: str, optional
:param gainslift_bins: Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
binning.
Defaults to ``-1``.
:type gainslift_bins: int
:param auc_type: Set default multinomial AUC type.
Defaults to ``"auto"``.
:type auc_type: Literal["auto", "none", "macro_ovr", "weighted_ovr", "macro_ovo", "weighted_ovo"]
"""
super(H2ONaiveBayesEstimator, self).__init__()
self._parms = {}
self._id = self._parms['model_id'] = model_id
self.nfolds = nfolds
self.seed = seed
self.fold_assignment = fold_assignment
self.fold_column = fold_column
self.keep_cross_validation_models = keep_cross_validation_models
self.keep_cross_validation_predictions = keep_cross_validation_predictions
self.keep_cross_validation_fold_assignment = keep_cross_validation_fold_assignment
self.training_frame = training_frame
self.validation_frame = validation_frame
self.response_column = response_column
self.ignored_columns = ignored_columns
self.ignore_const_cols = ignore_const_cols
self.score_each_iteration = score_each_iteration
self.balance_classes = balance_classes
self.class_sampling_factors = class_sampling_factors
self.max_after_balance_size = max_after_balance_size
self.max_confusion_matrix_size = max_confusion_matrix_size
self.laplace = laplace
self.min_sdev = min_sdev
self.eps_sdev = eps_sdev
self.min_prob = min_prob
self.eps_prob = eps_prob
self.compute_metrics = compute_metrics
self.max_runtime_secs = max_runtime_secs
self.export_checkpoints_dir = export_checkpoints_dir
self.gainslift_bins = gainslift_bins
self.auc_type = auc_type
@property
def nfolds(self):
"""
Number of folds for K-fold cross-validation (0 to disable or >= 2).
Type: ``int``, defaults to ``0``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> cars_nb = H2ONaiveBayesEstimator(nfolds=5,
... seed=1234)
>>> cars_nb.train(x=predictors,
... y=response,
... training_frame=cars)
>>> cars_nb.auc()
"""
return self._parms.get("nfolds")
@nfolds.setter
def nfolds(self, nfolds):
assert_is_type(nfolds, None, int)
self._parms["nfolds"] = nfolds
@property
def seed(self):
"""
Seed for pseudo random number generator (only used for cross-validation and fold_assignment="Random" or "AUTO")
Type: ``int``, defaults to ``-1``.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines["Month"] = airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid= airlines.split_frame(ratios=[.8], seed=1234)
>>> nb_w_seed = H2ONaiveBayesEstimator(seed=1234)
>>> nb_w_seed.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> nb_wo_seed = H2ONaiveBayesEstimator()
>>> nb_wo_seed.train(x=predictors,
... y=response,
... training_frame=train,
... validation_frame=valid)
>>> nb_w_seed.auc()
>>> nb_wo_seed.auc()
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def fold_assignment(self):
"""
Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
the folds based on the response variable, for classification problems.
Type: ``Literal["auto", "random", "modulo", "stratified"]``, defaults to ``"auto"``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> cars_nb = H2ONaiveBayesEstimator(fold_assignment="Random",
... nfolds=5,
... seed=1234)
>>> response = "economy_20mpg"
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> cars_nb.train(x=predictors, y=response, training_frame=cars)
>>> cars_nb.auc()
"""
return self._parms.get("fold_assignment")
@fold_assignment.setter
def fold_assignment(self, fold_assignment):
assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified"))
self._parms["fold_assignment"] = fold_assignment
@property
def fold_column(self):
"""
Column with cross-validation fold index assignment per observation.
Type: ``str``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> fold_numbers = cars.kfold_column(n_folds=5, seed=1234)
>>> fold_numbers.set_names(["fold_numbers"])
>>> cars = cars.cbind(fold_numbers)
>>> cars_nb = H2ONaiveBayesEstimator(seed=1234)
>>> cars_nb.train(x=predictors,
... y=response,
... training_frame=cars,
... fold_column="fold_numbers")
>>> cars_nb.auc()
"""
return self._parms.get("fold_column")
@fold_column.setter
def fold_column(self, fold_column):
assert_is_type(fold_column, None, str)
self._parms["fold_column"] = fold_column
@property
def keep_cross_validation_models(self):
"""
Whether to keep the cross-validation models.
Type: ``bool``, defaults to ``True``.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_nb = H2ONaiveBayesEstimator(keep_cross_validation_models=True,
... nfolds=5,
... seed=1234)
>>> cars_nb.train(x=predictors,
... y=response,
... training_frame=train)
>>> cars_nb.cross_validation_models()
"""
return self._parms.get("keep_cross_validation_models")
@keep_cross_validation_models.setter
def keep_cross_validation_models(self, keep_cross_validation_models):
assert_is_type(keep_cross_validation_models, None, bool)
self._parms["keep_cross_validation_models"] = keep_cross_validation_models
@property
def keep_cross_validation_predictions(self):
"""
Whether to keep the predictions of the cross-validation models.
Type: ``bool``, defaults to | |
* 9 * 10, dtype=np.uint8).reshape((5, 7, 9, 10))
dataXCorrect, dataYCorrect, patchIndicesCorrect = \
reference_extract_patched_all_data_without_empty_4d(data, (3, 4, 3, 5), (1, 1, 2, 1), 15)
dataXActual, dataYActual, patchIndicesActual = \
patching_tools.extract_patched_all_data_without_empty_4d(data, (3, 4, 3, 5), (1, 1, 2, 1), 15,
batchSize=100)
self.assertEqual(dataXCorrect.shape, dataXActual.shape)
self.assertEqual(dataYCorrect.shape, dataYActual.shape)
self.assertEqual(patchIndicesCorrect.shape, patchIndicesActual.shape)
self.assertTrue(np.all(np.equal(dataXCorrect, dataXActual)))
self.assertTrue(np.all(np.equal(dataYCorrect, dataYActual)))
self.assertTrue(np.all(np.equal(patchIndicesCorrect, patchIndicesActual)))
# ------------------------------------------------------
# Stride of (1, 1, 1, 1).
data = np.arange(0, 5 * 7 * 9 * 10, dtype=np.uint8).reshape((5, 7, 9, 10))
dataXCorrect, dataYCorrect, patchIndicesCorrect = \
reference_extract_patched_all_data_without_empty_4d(data, (3, 4, 3, 5), (1, 1, 1, 1), 15)
dataXActual, dataYActual, patchIndicesActual = \
patching_tools.extract_patched_all_data_without_empty_4d(data, (3, 4, 3, 5), (1, 1, 1, 1), 15,
batchSize=100)
self.assertEqual(dataXCorrect.shape, dataXActual.shape)
self.assertEqual(dataYCorrect.shape, dataYActual.shape)
self.assertEqual(patchIndicesCorrect.shape, patchIndicesActual.shape)
self.assertTrue(np.all(np.equal(dataXCorrect, dataXActual)))
self.assertTrue(np.all(np.equal(dataYCorrect, dataYActual)))
self.assertTrue(np.all(np.equal(patchIndicesCorrect, patchIndicesActual)))
# ------------------------------------------------------
# The same thing, but with some empty patches
data = np.arange(0, 5 * 7 * 9 * 10, dtype=np.uint8).reshape((5, 7, 9, 10))
data[0:4, 0:6, 0:8, 1:9] = 15
dataXCorrect, dataYCorrect, patchIndicesCorrect = \
reference_extract_patched_all_data_without_empty_4d(data, (3, 4, 3, 5), (1, 1, 2, 1), 15)
dataXActual, dataYActual, patchIndicesActual = \
patching_tools.extract_patched_all_data_without_empty_4d(data, (3, 4, 3, 5), (1, 1, 2, 1), 15,
batchSize=100)
self.assertEqual(dataXCorrect.shape, dataXActual.shape)
self.assertEqual(dataYCorrect.shape, dataYActual.shape)
self.assertEqual(patchIndicesCorrect.shape, patchIndicesActual.shape)
self.assertTrue(np.all(np.equal(dataXCorrect, dataXActual)))
self.assertTrue(np.all(np.equal(dataYCorrect, dataYActual)))
self.assertTrue(np.all(np.equal(patchIndicesCorrect, patchIndicesActual)))
def test_shuffle_hdf_arrays_together(self):
shapeX = (1000, 25, 25)
shapeY = (1000, 25, 1)
tempDir = tempfile.mkdtemp()
file = h5py.File(os.path.join(tempDir, 'temp.h5py'))
dataX = file.create_dataset('tempX', shapeX, np.float32)
dataX[...] = np.random.uniform(0, 1000, shapeX)
dataY = file.create_dataset('tempY', shapeY, np.float32)
for i in range(0, shapeX[0]):
for j in range(0, shapeX[1]):
simpleHash = np.sum(dataX[i, j, :].astype(np.int32)) % 73
dataY[i, j] = simpleHash
firstColumnBefore = dataX[:, 0, 0]
dataYBefore = dataY[...].copy()
timeBefore = time.time()
npe.shuffle_hdf_arrays_together(dataX, dataY, blockSize=13)
print("Shuffled in {:.2f} s.".format(time.time() - timeBefore))
# Check that order has changed.
self.assertFalse(np.all(np.equal(firstColumnBefore, dataX[:, 0, 0])),
msg='If we are extremely unlucky, the order might not change')
# Check that the arrays are still in sync.
for i in range(0, shapeX[0]):
for j in range(0, shapeX[1]):
simpleHash = np.sum(dataX[i, j, :].astype(np.int32)) % 73
self.assertEqual(dataY[i, j], simpleHash)
# Check that arrays have the same content.
self.assertTrue(np.all(np.equal(np.sort(dataYBefore.flatten()),
np.sort(dataY[...].flatten()))))
def test_abs_diff_hdf_arrays(self):
shape = (10000, 25, 25)
tempDir = tempfile.mkdtemp()
file = h5py.File(os.path.join(tempDir, 'temp.h5py'))
dataA = file.create_dataset('tempA', shape, np.uint8)
dataB = file.create_dataset('tempB', shape, np.uint8)
out = file.create_dataset('out', shape, np.float32)
dataA[...] = np.random.uniform(0, 255, shape)
dataB[...] = np.random.uniform(0, 255, shape)
out[...] = np.random.uniform(0, 255, shape)
npe.abs_diff_hdf_arrays(dataA, dataB, out, np.float32, batchSizeFlat=3119)
trueDiff = np.abs(dataA[...].astype(np.float32) - dataB[...].astype(np.float32))
self.assertTrue(np.all(np.equal(out, trueDiff)))
def test_abs_diff_hdf_arrays_masked(self):
shape = (10000, 25, 25)
tempDir = tempfile.mkdtemp()
file = h5py.File(os.path.join(tempDir, 'temp.h5py'))
dataA = file.create_dataset('tempA', shape, np.uint8)
dataB = file.create_dataset('tempB', shape, np.uint8)
mask = file.create_dataset('mask', shape, np.bool)
out = file.create_dataset('out', shape, np.float32)
dataA[...] = np.random.uniform(0, 255, shape)
dataB[...] = np.random.uniform(0, 255, shape)
mask[...] = False
mask[:5000] = True
out[...] = np.random.uniform(0, 255, shape)
npe.abs_diff_hdf_arrays_masked(dataA, dataB, mask, out, np.float32, batchSizeFlat=3119)
trueDiff = np.abs(dataA[...].astype(np.float32) - dataB[...].astype(np.float32))
trueDiff[np.logical_not(mask)] = 0
self.assertTrue(np.all(np.equal(out, trueDiff)))
def test_mse_large_arrays_with_hdf(self):
shape = (10000, 25, 25)
tempDir = tempfile.mkdtemp()
file = h5py.File(os.path.join(tempDir, 'temp.h5py'))
dataA = file.create_dataset('tempA', shape, np.uint8)
dataB = file.create_dataset('tempB', shape, np.uint8)
dataA[...] = np.random.uniform(0, 255, shape)
dataB[...] = np.random.uniform(0, 255, shape)
# Test typical conditions.
mse = npe.mse_large_arrays(dataA, dataB, np.float64, batchSizeFlat=7119)
trueMse = np.mean(np.square(dataA[...].astype(np.float64) - dataB[...].astype(np.float64)), dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
# Test batches with an imbalanced number of elements.
mse = npe.mse_large_arrays(dataA, dataB, np.float64, batchSizeFlat=8000 * 25 * 25 * 4)
trueMse = np.mean(np.square(dataA[...].astype(np.float64) - dataB[...].astype(np.float64)), dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
def test_mse_large_arrays_with_bna(self):
shape = (10000, 25, 25)
dataA = BufferedNdArray(tempfile.mktemp(), BufferedNdArray.FileMode.rewrite, shape, np.uint8, int(1e8))
dataB = BufferedNdArray(tempfile.mktemp(), BufferedNdArray.FileMode.rewrite, shape, np.uint8, int(1e8))
dataA[...] = np.random.uniform(0, 255, shape).astype(np.uint8)
dataB[...] = np.random.uniform(0, 255, shape).astype(np.uint8)
# Test typical conditions.
mse = npe.mse_large_arrays(dataA, dataB, np.float64, batchSizeFlat=7119)
trueMse = np.mean(np.square(dataA[...].astype(np.float64) - dataB[...].astype(np.float64)), dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
# Test batches with an imbalanced number of elements.
mse = npe.mse_large_arrays(dataA, dataB, np.float64, batchSizeFlat=8000 * 25 * 25 * 4)
trueMse = np.mean(np.square(dataA[...].astype(np.float64) - dataB[...].astype(np.float64)), dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
def test_large_arrays_masked_with_hdf(self):
shape = (10000, 25, 25)
tempDir = tempfile.mkdtemp()
file = h5py.File(os.path.join(tempDir, 'temp.h5py'))
dataA = file.create_dataset('tempA', shape, np.uint8)
dataB = file.create_dataset('tempB', shape, np.uint8)
mask = file.create_dataset('mask', shape, np.bool)
dataA[...] = np.random.uniform(0, 255, shape)
dataB[...] = np.random.uniform(0, 255, shape)
mask[...] = False
mask[:5000] = True
# Test typical conditions.
mse = npe.mse_large_arrays_masked(dataA, dataB, mask, np.float64, batchSizeFlat=27119)
trueMse = np.mean(np.square(dataA[:5000].astype(np.float64) - dataB[:5000].astype(np.float64)),
dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
# Test batches with an imbalanced number of elements.
mse = npe.mse_large_arrays_masked(dataA, dataB, mask, np.float64, batchSizeFlat=8000 * 25 * 25 * 4)
trueMse = np.mean(np.square(dataA[:5000].astype(np.float64) - dataB[:5000].astype(np.float64)),
dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
def test_large_arrays_masked_with_bna(self):
shape = (10000, 25, 25)
dataA = BufferedNdArray(tempfile.mktemp(), BufferedNdArray.FileMode.rewrite, shape, np.uint8, int(1e8))
dataB = BufferedNdArray(tempfile.mktemp(), BufferedNdArray.FileMode.rewrite, shape, np.uint8, int(1e8))
mask = BufferedNdArray(tempfile.mktemp(), BufferedNdArray.FileMode.rewrite, shape, np.uint8, int(1e8))
dataA[...] = np.random.uniform(0, 255, shape).astype(np.uint8)
dataB[...] = np.random.uniform(0, 255, shape).astype(np.uint8)
mask[...] = np.zeros(shape, dtype=np.uint8)
for i in range(5000):
mask[i] = np.ones(shape[1:], dtype=np.uint8)
# Test typical conditions.
mse = npe.mse_large_arrays_masked(dataA, dataB, mask, np.float64, batchSizeFlat=27119)
trueMse = np.mean(np.square(dataA[:5000].astype(np.float64) - dataB[:5000].astype(np.float64)),
dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
# Test batches with an imbalanced number of elements.
mse = npe.mse_large_arrays_masked(dataA, dataB, mask, np.float64, batchSizeFlat=8000 * 25 * 25 * 4)
trueMse = np.mean(np.square(dataA[:5000].astype(np.float64) - dataB[:5000].astype(np.float64)),
dtype=np.float64)
self.assertAlmostEqual(mse, trueMse)
def test_var_large_array(self):
shape = (10000, 25, 25)
tempDir = tempfile.mkdtemp()
file = h5py.File(os.path.join(tempDir, 'temp.h5py'))
data = file.create_dataset('tempA', shape, np.uint8)
data[...] = np.random.uniform(0, 255, shape)
# Test typical conditions.
var = npe.var_large_array(data, np.float64, batchSizeFlat=7119)
trueVar = np.mean(np.square(data[...].astype(np.float64)), dtype=np.float64) - \
np.mean(data[...].astype(np.float64), dtype=np.float64) ** 2
self.assertAlmostEqual(var, trueVar)
# Test batches with an imbalanced number of elements.
var = npe.var_large_array(data, np.float64, batchSizeFlat=8000 * 25 * 25 * 4)
trueVar = np.mean(np.square(data[...].astype(np.float64)), dtype=np.float64) - \
np.mean(data[...].astype(np.float64), dtype=np.float64) ** 2
self.assertAlmostEqual(var, trueVar)
data = file.create_dataset('tempB', (10, 1), np.uint8)
data[...] = np.asarray([0, 0, 0, 0, 0, 10, 10, 10, 10, 10]).reshape(10, 1)
var = npe.var_large_array(data, np.float64, batchSizeFlat=3119)
self.assertAlmostEqual(var, 25.0)
def test_var_large_array_masked(self):
shape = (10000, 25, 15)
tempDir = tempfile.mkdtemp()
file = h5py.File(os.path.join(tempDir, 'temp.h5py'))
data = file.create_dataset('tempA', shape, np.uint8)
mask = file.create_dataset('maskA', shape, np.bool)
data[...] = np.random.uniform(0, 255, shape)
mask[...] = False
mask[:5000] = True
# Test typical conditions.
var = npe.var_large_array_masked(data, mask, np.float64, batchSizeFlat=7119)
trueVar = np.mean(np.square(data[:5000].astype(np.float64)), dtype=np.float64) - \
np.mean(data[:5000].astype(np.float64), dtype=np.float64) ** 2
numpyVar = np.var(data[:5000], dtype=np.float64)
self.assertAlmostEqual(var, trueVar)
self.assertAlmostEqual(var, numpyVar)
# Test batches with an imbalanced number of elements.
# The first batch has 4000 nonzero slices, the second - only 1000.
var = npe.var_large_array_masked(data, mask, np.float64, batchSizeFlat=4000 * 25 * 15 * 4)
trueVar = np.mean(np.square(data[:5000].astype(np.float64)), dtype=np.float64) - \
np.mean(data[:5000].astype(np.float64), dtype=np.float64) ** 2
self.assertAlmostEqual(var, trueVar)
# Test a small easy to understand case.
data = file.create_dataset('tempB', (10, 1), np.uint8)
mask = file.create_dataset('maskB', (10, 1), np.bool)
data[...] = np.asarray([0, 0, 0, 0, 66, 66, 10, 10, 10, 10]).reshape(10, 1)
mask[...] = True
mask[4:6] = False
var = npe.var_large_array_masked(data, mask, np.float64, batchSizeFlat=3119)
self.assertAlmostEqual(var, 25.0)
def test_get_batch_indices(self):
shape = (1000, 5, 7)
dtype = np.float32
batchSizeFlat = 32000
expectedBatchSize = 228 # 32000 / (5 * 7 * 4)
expectedIndices = [(0, 228), (228, 456), (456, 684), (684, 912), (912, 1000)]
actualIndices = list(npe.get_batch_indices(shape, dtype, batchSizeFlat=batchSizeFlat))
self.assertEqual(expectedIndices, actualIndices)
# Test a very large batch.
actualIndices = list(npe.get_batch_indices(shape, dtype, batchSizeFlat=1e10))
self.assertEqual([(0, 1000)], actualIndices)
# Test a batch that is too small.
with self.assertRaises(RuntimeError):
list(npe.get_batch_indices(shape, dtype, batchSizeFlat=10))
# todo: Test the fixed batch size parameter.
def test_numpy_json_encoder(self):
tempDir = tempfile.gettempdir()
tempPath = os.path.join(tempDir, 'test_json_encoder.json')
for dtype in [np.float32, np.float64, np.int32, np.int64, np.uint8]:
array = np.ones(5, dtype)
value = array[0]
with open(tempPath, 'w') as file: # Overwrite existing.
json.dump({'key': value}, file, cls=npe.JsonEncoder)
with open(tempPath, 'r') as file:
contents = json.load(file)
self.assertEqual(contents['key'], value)
def test_moving_average_nd(self):
data = np.array(
[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
expected = np.array(
[[ 8, 15, 12],
[21, 36, 27],
[20, 33, 24]]) / 9.0
actual = npe.moving_average_nd(data, kernelSize=3)
self.assertTrue(np.all(np.less(np.abs(actual - expected), 1e-5)))
expected = np.array(
[[0, 1, 3],
[3, 8, 12],
[9, 20, 24]]) / 4.0
actual = npe.moving_average_nd(data, kernelSize=2)
self.assertTrue(np.all(np.less(np.abs(actual - expected), 1e-5)))
def test_sparse_insert_into_bna(self):
tempPath = self._get_temp_filepath('test_sparse_insert_into_bna.raw')
testCaseSize = 2000
shape = (10, 64, 64, 64)
dataSizeFlat = npe.multiply(shape)
np.random.seed(1771)
# Double the size, to test that the provided insert count is respected.
indices = np.random.randint(0, | |
+ "p42881275-p43008824.7z"),
page_ids=range(42881275, 43008825),
darus_id=93586,
sha1="e3d370bd1c9cf81348eee7d97ac567018f2598d9",
size=441514920,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43008825-p43151391.7z"),
page_ids=range(43008825, 43151392),
darus_id=93589,
sha1="f3e971a091c0ac00760868c1a97a5da9fc4a662e",
size=486506725,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43151392-p43249708.7z"),
page_ids=range(43151392, 43249709),
darus_id=93590,
sha1="1314f8c1e9dd7dd43ff5a376c73fe562db72e46a",
size=378116861,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43249709-p43351690.7z"),
page_ids=range(43249709, 43351691),
darus_id=93591,
sha1="a4e45f4473b740aa7195ea78aaaf551316a8e29c",
size=391052823,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43351691-p43435317.7z"),
page_ids=range(43351691, 43435318),
darus_id=93592,
sha1="c6db7321cc1719aa4224ca7111eec432150da879",
size=325307715,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43435318-p43535735.7z"),
page_ids=range(43435318, 43535736),
darus_id=93594,
sha1="20c233f8f72b5205c33d6b1e67b3379bf577055f",
size=360728480,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43535736-p43645696.7z"),
page_ids=range(43535736, 43645697),
darus_id=93595,
sha1="5aa817a0cf5e65526be75b5eda02018e150c0a26",
size=383653850,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43645697-p43761276.7z"),
page_ids=range(43645697, 43761277),
darus_id=93596,
sha1="3ab25a65d4585cade1e4e717741385666b2aca77",
size=420538647,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43761277-p43880515.7z"),
page_ids=range(43761277, 43880516),
darus_id=93597,
sha1="bb26f60392d48b6f16add246a17a79cea45e1c7b",
size=437750278,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43880516-p43980361.7z"),
page_ids=range(43880516, 43980362),
darus_id=93598,
sha1="a7aed0c919b69f4080b180e109e3d89955cebdd5",
size=369500081,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p43980362-p44101941.7z"),
page_ids=range(43980362, 44101942),
darus_id=93608,
sha1="2181425b29fa99abe2ab2059816b84eaa7430e12",
size=421936549,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p44101942-p44219282.7z"),
page_ids=range(44101942, 44219283),
darus_id=93611,
sha1="6ed18a8d6c85f5c490d9b58f8e6c35864845fea6",
size=377243025,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p44219283-p44356783.7z"),
page_ids=range(44219283, 44356784),
darus_id=93613,
sha1="cb7ebb0089483e0f97014716ab7164ca71357948",
size=460077739,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p44356784-p44483765.7z"),
page_ids=range(44356784, 44483766),
darus_id=93614,
sha1="338a5125de25de2e51df1343d49974b409ee9ee0",
size=453194808,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p44483766-p44611606.7z"),
page_ids=range(44483766, 44611607),
darus_id=93615,
sha1="135d4f6e6666b354c6f1950511bcc21187827356",
size=429128673,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p44611607-p44770311.7z"),
page_ids=range(44611607, 44770312),
darus_id=93616,
sha1="ce641a384cdd9a620fb0b80b69cbdb0098b32e6b",
size=520095692,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p44770312-p44943742.7z"),
page_ids=range(44770312, 44943743),
darus_id=93619,
sha1="af076d31f2cad5f158849e69c445edf36a01dd90",
size=588527454,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p44943743-p45122939.7z"),
page_ids=range(44943743, 45122940),
darus_id=93620,
sha1="164af834768893690cab8d0c73e0072b2fee635a",
size=596484297,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p45122940-p45289497.7z"),
page_ids=range(45122940, 45289498),
darus_id=93622,
sha1="d57e3ecc35d99375e075574bb97ae445617d05c9",
size=575109629,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p45289498-p45461169.7z"),
page_ids=range(45289498, 45461170),
darus_id=93623,
sha1="1df044658eed9e069ad440d9bf9675e5477461cf",
size=581072513,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p45461170-p45527040.7z"),
page_ids=range(45461170, 45527041),
darus_id=93625,
sha1="971f88c16560046fe9feaa33121f241df881d071",
size=228340450,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p45527041-p45699840.7z"),
page_ids=range(45527041, 45699841),
darus_id=93628,
sha1="06fae8452160fc91fbe76bf32495035c972d098e",
size=587193725,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p45699841-p45873683.7z"),
page_ids=range(45699841, 45873684),
darus_id=93629,
sha1="c2ed03a912bee4cd16ea8e85aee2e112a11eebe0",
size=586887467,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p45873684-p46059085.7z"),
page_ids=range(45873684, 46059086),
darus_id=93631,
sha1="54afd852cb7789f9b7d5684d12edabbfda9db1df",
size=611648880,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p46059086-p46223247.7z"),
page_ids=range(46059086, 46223248),
darus_id=93632,
sha1="4747eaf3da47c61b7d268f70984c8831e6e5696d",
size=573414077,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p46223248-p46403271.7z"),
page_ids=range(46223248, 46403272),
darus_id=93633,
sha1="7ba584c9756adadaaf193164ec27fa92b9c2b9d0",
size=581138849,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p46403272-p46546103.7z"),
page_ids=range(46403272, 46546104),
darus_id=93635,
sha1="f4ea90c7561d868f937f6f742a8224a03e84093c",
size=470542822,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p46546104-p46649408.7z"),
page_ids=range(46546104, 46649409),
darus_id=93637,
sha1="06c1bb7a263d548955c3d5a2c968bed0d5c35115",
size=271105821,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p46649409-p46747270.7z"),
page_ids=range(46649409, 46747271),
darus_id=93638,
sha1="edec24d7957a647ad554b9199e09b5e36ac417d5",
size=217344923,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p46747271-p46847320.7z"),
page_ids=range(46747271, 46847321),
darus_id=93639,
sha1="976714a0c7e5b83f965539954b61b3b65e5b7ac1",
size=221889645,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p46847321-p47001936.7z"),
page_ids=range(46847321, 47001937),
darus_id=93641,
sha1="3ed7fcf08454a24acebb13cfaad52664ee0fb2fb",
size=483275848,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47001937-p47133463.7z"),
page_ids=range(47001937, 47133464),
darus_id=93643,
sha1="c939f9f9867085024014c5c124b42e26f3a57b58",
size=460858471,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47133464-p47263735.7z"),
page_ids=range(47133464, 47263736),
darus_id=93644,
sha1="e550fd5a3a5327261f6bae34bb7f318f2a31c990",
size=455806808,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47263736-p47390812.7z"),
page_ids=range(47263736, 47390813),
darus_id=93646,
sha1="5ff03e6b4c8d9721cb9789c126acbbf52ee9541a",
size=450457451,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47390813-p47504961.7z"),
page_ids=range(47390813, 47504962),
darus_id=93647,
sha1="b1b3ccc81f2851441f2d73e9f8299f03a6227339",
size=421409556,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47504962-p47638419.7z"),
page_ids=range(47504962, 47638420),
darus_id=93648,
sha1="bb9cfa9adf3f30a092501412d7ae5fc620cb74bc",
size=474440958,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47638420-p47774069.7z"),
page_ids=range(47638420, 47774070),
darus_id=93651,
sha1="f75901e7c547bb66e594f3c68c8d59a6245a4eba",
size=487228892,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47774070-p47906796.7z"),
page_ids=range(47774070, 47906797),
darus_id=93652,
sha1="8d446153829ffbe02ba5ef497107d84e9930690a",
size=493553082,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p47906797-p48036547.7z"),
page_ids=range(47906797, 48036548),
darus_id=93653,
sha1="bd211e1c6e2f6e94d2fe8cb22b5125df2178cd40",
size=465357844,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48036548-p48157308.7z"),
page_ids=range(48036548, 48157309),
darus_id=93655,
sha1="c02d0b18716de299040720d052494ca2b3fa5b87",
size=408882559,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48157309-p48211944.7z"),
page_ids=range(48157309, 48211945),
darus_id=93656,
sha1="878493f5b28d143ff82a5fe07cd4b08a2c443e2e",
size=232546070,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48211945-p48312321.7z"),
page_ids=range(48211945, 48312322),
darus_id=93657,
sha1="0dc634698fe4189515d0a344d611645e72cc7523",
size=368701886,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48312322-p48419073.7z"),
page_ids=range(48312322, 48419074),
darus_id=93658,
sha1="220ad5ffb97d9d1d8ea6faa6a8ce01de651fee9d",
size=390268978,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48419074-p48529609.7z"),
page_ids=range(48419074, 48529610),
darus_id=93659,
sha1="6e5e3fb80b4825a092ce2a6f9d19201c3f174713",
size=373894405,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48529610-p48641824.7z"),
page_ids=range(48529610, 48641825),
darus_id=93661,
sha1="49d33a48e0e25d1631a29605477414c0d64aa444",
size=369807963,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48641825-p48744727.7z"),
page_ids=range(48641825, 48744728),
darus_id=93662,
sha1="40ca63b366afcd794eff96e2115ea71157ff935d",
size=391330767,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48744728-p48855563.7z"),
page_ids=range(48744728, 48855564),
darus_id=93663,
sha1="ab037184b1bd914464f3a72aa6ee8f1369f77691",
size=415825408,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48855564-p48971557.7z"),
page_ids=range(48855564, 48971558),
darus_id=93665,
sha1="ab19a5f548522b2f30d3e5c62a8be8ef2e700d31",
size=424427476,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p48971558-p49088278.7z"),
page_ids=range(48971558, 49088279),
darus_id=93666,
sha1="6c37a947acbe0c5ea7ca784b3f2b6acd45e63d85",
size=427253841,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p49088279-p49191572.7z"),
page_ids=range(49088279, 49191573),
darus_id=93668,
sha1="eb76c03a7ef8c447e4398e43bc38a786e85038e5",
size=403244578,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p49191573-p49295569.7z"),
page_ids=range(49191573, 49295570),
darus_id=93669,
sha1="e8fdfe3bb06f26f1b9f3af2a72ddc01c82b8d43e",
size=398883064,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p49295570-p49407627.7z"),
page_ids=range(49295570, 49407628),
darus_id=93671,
sha1="ede3336eb8954b7a5b5f322c2e404e358bebb045",
size=417824295,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p49407628-p49555852.7z"),
page_ids=range(49407628, 49555853),
darus_id=93672,
sha1="2b35ae3003832b8e684347b779a59660167e3045",
size=512447118,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p49555853-p49680990.7z"),
page_ids=range(49555853, 49680991),
darus_id=93673,
sha1="1d140d77ad33962572c2b174dcba417616742982",
size=428506806,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p49680991-p49856120.7z"),
page_ids=range(49680991, 49856121),
darus_id=93674,
sha1="f6d996f8937cb57609ca57b6f23d063a922c36f2",
size=522480169,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p49856121-p50016141.7z"),
page_ids=range(49856121, 50016142),
darus_id=93676,
sha1="a70ac72145e5dcc6f6e3d87fb4d06f3b20d52efd",
size=446909306,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50016142-p50147937.7z"),
page_ids=range(50016142, 50147938),
darus_id=93678,
sha1="13210f1889160b1a0b21f3dafc2190f569dda45a",
size=351613754,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50147938-p50296512.7z"),
page_ids=range(50147938, 50296513),
darus_id=93680,
sha1="4bb16f3e1e47525ac77eee67bac593c87360e0e9",
size=347498457,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50296513-p50420873.7z"),
page_ids=range(50296513, 50420874),
darus_id=93681,
sha1="5d9cc8434e5583fd0ce66e3e4755f7abc0a3eeb2",
size=283514567,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50420874-p50526467.7z"),
page_ids=range(50420874, 50526468),
darus_id=93682,
sha1="4b1005e8f5c06d5176f2f46d74e93de0ffc6a160",
size=242285285,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50526468-p50636067.7z"),
page_ids=range(50526468, 50636068),
darus_id=93684,
sha1="c820c0f684e10cfe0c46ef9b393f34e7b8f91761",
size=256015169,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50636068-p50690485.7z"),
page_ids=range(50636068, 50690486),
darus_id=93685,
sha1="08c4722a3e758cf375a216232a12de12555967c9",
size=136582268,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50690486-p50823114.7z"),
page_ids=range(50690486, 50823115),
darus_id=93687,
sha1="a46fbfaaf9160a8c38bcb4880d981842165c8620",
size=317276123,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50823115-p50942357.7z"),
page_ids=range(50823115, 50942358),
darus_id=93688,
sha1="f74fb7c7ef067b147e9d0ab573e570dcd66258be",
size=323870173,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p50942358-p51099019.7z"),
page_ids=range(50942358, 51099020),
darus_id=93689,
sha1="75d28833c679b5c6fb5c75de9a8d84447ddd7242",
size=479073462,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51099020-p51222302.7z"),
page_ids=range(51099020, 51222303),
darus_id=93691,
sha1="ec124b37b17fe52537e28b574c5681978f15d9dd",
size=420264993,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51222303-p51337270.7z"),
page_ids=range(51222303, 51337271),
darus_id=93692,
sha1="5d65af93a3d607f595738217a6a7bb1b51b32cc0",
size=374153902,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51337271-p51401123.7z"),
page_ids=range(51337271, 51401124),
darus_id=93693,
sha1="cffd1eebc9c7e118690ca9847d4b908b4a25f6f4",
size=188347726,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51401124-p51534901.7z"),
page_ids=range(51401124, 51534902),
darus_id=93695,
sha1="62c3a3173ba89268a8b6fa4bf2021d46c51cd4a8",
size=461167742,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51534902-p51657569.7z"),
page_ids=range(51534902, 51657570),
darus_id=93696,
sha1="4106fd53060e1dab9e7a4dcb7dd338fba4cd901b",
size=402969936,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51657570-p51724351.7z"),
page_ids=range(51657570, 51724352),
darus_id=93697,
sha1="d17d6cf04eeb4c8d4493ad399959a08a3cf17b30",
size=224254065,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51724352-p51801432.7z"),
page_ids=range(51724352, 51801433),
darus_id=93698,
sha1="0017380ccaf99ded6dd2a1434036da1870b50572",
size=262661002,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51801433-p51920901.7z"),
page_ids=range(51801433, 51920902),
darus_id=93701,
sha1="cffbcb579dada8476f11ef9ad5e0bcced6b5e086",
size=413383969,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p51920902-p52047769.7z"),
page_ids=range(51920902, 52047770),
darus_id=93702,
sha1="f6492c8baee558c84fcdd06aa407e26e5d8398a6",
size=433838804,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p52047770-p52189318.7z"),
page_ids=range(52047770, 52189319),
darus_id=93703,
sha1="817c4bbf74ea593079f132b9863a3a3aa92e430e",
size=460824290,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p52189319-p52335368.7z"),
page_ids=range(52189319, 52335369),
darus_id=93705,
sha1="69bbe7f2f844335dfdf597ab4283e259d901ba20",
size=471542481,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p52335369-p52437132.7z"),
page_ids=range(52335369, 52437133),
darus_id=93707,
sha1="0ae26bbecde7936cc962887e237670820b5d7205",
size=314100825,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p52437133-p52583691.7z"),
page_ids=range(52437133, 52583692),
darus_id=93708,
sha1="2ada7cab33671d3f3d6fa73dc08de7af2a3bc604",
size=497376615,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p52583692-p52707566.7z"),
page_ids=range(52583692, 52707567),
darus_id=93709,
sha1="9f59c5acf65ee0e4d816034cc94ade8e150d598b",
size=426898676,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p52707567-p52853042.7z"),
page_ids=range(52707567, 52853043),
darus_id=93711,
sha1="f8993f511c27fb4500932143381318929325755b",
size=486719933,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p52853043-p53025666.7z"),
page_ids=range(52853043, 53025667),
darus_id=93712,
sha1="737ee4387b20d239fbe65199a429ed8ffa46a27b",
size=558304678,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53025667-p53160630.7z"),
page_ids=range(53025667, 53160631),
darus_id=93714,
sha1="c11b7b835503075a55431e79f61f6887909cf70b",
size=433173549,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53160631-p53353101.7z"),
page_ids=range(53160631, 53353102),
darus_id=93715,
sha1="a3c05cf363230ac08f038c706d67fde5c74367d3",
size=585597574,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53353102-p53469010.7z"),
page_ids=range(53353102, 53469011),
darus_id=93716,
sha1="dfa04f928a65618abd54c1ee6dc28a32c01d2823",
size=392415308,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53469011-p53614717.7z"),
page_ids=range(53469011, 53614718),
darus_id=93718,
sha1="12544fac18c80fbac4a01a4a1cfed83b3bec772e",
size=457826899,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53614718-p53792073.7z"),
page_ids=range(53614718, 53792074),
darus_id=93719,
sha1="8caa7efa73f2dc7f1481a47e78e30ff9b7a0d243",
size=522453192,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53792074-p53892668.7z"),
page_ids=range(53792074, 53892669),
darus_id=93720,
sha1="5931a51cb0b72975831083c731efc07be660b658",
size=353770334,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53892669-p53974823.7z"),
page_ids=range(53892669, 53974824),
darus_id=93723,
sha1="9c6e8ae2f1955f819da8ba9ecb234b2624c39c5c",
size=283515459,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p53974824-p54042840.7z"),
page_ids=range(53974824, 54042841),
darus_id=93724,
sha1="accac2c4a34f47176e5735ff58928c41e69dd01f",
size=227191803,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p54042841-p54081671.7z"),
page_ids=range(54042841, 54081672),
darus_id=93725,
sha1="07d38de07c996c5fd63bd45c6e3a48efaededcdd",
size=140006740,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p54081672-p54235229.7z"),
page_ids=range(54081672, 54235230),
darus_id=93726,
sha1="0e298eba9d49ac84c64fe938e92c07b55ce7bc10",
size=517076165,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p54235230-p54423927.7z"),
page_ids=range(54235230, 54423928),
darus_id=93727,
sha1="4857ba6e9fdf698b031521a165adc896535ca04f",
size=601487050,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p54423928-p54580506.7z"),
page_ids=range(54423928, 54580507),
darus_id=93729,
sha1="b12cf2ae15d8fa1fad9a0b076c336bf4a27c9c09",
size=520609296,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p54580507-p54746022.7z"),
page_ids=range(54580507, 54746023),
darus_id=93730,
sha1="011d61499fa0e909ada3092ebcf454c7e5c0d78c",
size=551233353,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p54746023-p54911931.7z"),
page_ids=range(54746023, 54911932),
darus_id=93731,
sha1="145018dd9b0c57b314140a891764e77fb30a97fe",
size=513502668,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p54911932-p55030702.7z"),
page_ids=range(54911932, 55030703),
darus_id=93732,
sha1="78b089d1fd742ab48ef2853d7b0b911010f84313",
size=294991147,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p55030703-p55134250.7z"),
page_ids=range(55030703, 55134251),
darus_id=93734,
sha1="fa271cf0bb6bcaaf0078b4a6052ae12afbabc282",
size=326779006,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p55134251-p55255749.7z"),
page_ids=range(55134251, 55255750),
darus_id=93735,
sha1="5921e598a43a00c55f1176912ed0afae95a5cd05",
size=334635972,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p55255750-p55399738.7z"),
page_ids=range(55255750, 55399739),
darus_id=93736,
sha1="2bc8a542418b6e334cdfea35d8e3b94b2a21e9a8",
| |
range(14):
ROM.write(BubbleLead[y])
Pointer += 1
elif weapons[x] == Quickbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(QuickBoomerang[y])
Pointer += 1
elif weapons[x] == Heatbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(AtomicFire[y])
Pointer += 1
elif weapons[x] == Woodbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(LeafShield[y])
Pointer += 1
elif weapons[x] == Metalbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(MetalBlade[y])
Pointer += 1
elif weapons[x] == Flashbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(TimeStopper[y])
Pointer += 1
elif weapons[x] == Crashbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(CrashBomber[y])
Pointer += 1
elif weapons[x] == Sparkbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(SparkShock[y])
Pointer += 1
elif weapons[x] == Snakebyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(SearchSnake[y])
Pointer += 1
elif weapons[x] == Needlebyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(NeedleCannon[y])
Pointer += 1
elif weapons[x] == Hardbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(HardKnuckle[y])
Pointer += 1
elif weapons[x] == Topbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(TopSpin[y])
Pointer += 1
elif weapons[x] == Geminibyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(GeminiLaser[y])
Pointer += 1
elif weapons[x] == Magnetbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(MagnetMissile[y])
Pointer += 1
elif weapons[x] == Shadowbyte:
Pointer = Palette[x]
Seek = ROM.seek(Pointer,0)
for y in range(14):
ROM.write(ShadowBlade[y])
Pointer += 1
#Writing weapon data if no random weapons
Weaponmenu = [0x65A53,0x65A55,0x65A57,0x65A59,0x65A5B,0x65A61,0x65A63,0x65A65]
Weapongraphics = [0x709CC,0x709D0,0x709D4,0x709D8,0x709DC,0x709E0,0x709E4,0x709E8]
Weaponname = [0x65A79,0x65A7F,0x65A85,0x65A8B,0x65A91,0x65AA3,0x65AA9,0x65AAF]
Cut = [b'\x05',b'\x3C',b'\x22',b'\x1B',b'\x0C']
Guts = [b'\x03',b'\x39',b'\xE2',b'\x1C',b'\x0A']
Ice = [b'\x04',b'\x3B',b'\x02',b'\x12',b'\x1C']
Bomb = [b'\x01',b'\x37',b'\xA2',b'\x11',b'\x0B']
Fire = [b'\x06',b'\x3D',b'\x42',b'\x0F',b'\x1C']
Elec = [b'\x02',b'\x38',b'\xC2',b'\x1D',b'\x0B']
Bubble = [b'\x0B',b'\x49',b'\xA2',b'\x0B',b'\x15']
Air = [b'\x09',b'\x47',b'\x62',b'\x0A',b'\x1C']
Quick = [b'\x0C',b'\x4A',b'\xC2',b'\x1A',b'\x0B']
Heat = [b'\x08',b'\x46',b'\x42',b'\x0A',b'\x0F']
Wood = [b'\x0A',b'\x48',b'\x82',b'\x15',b'\x1C']
Metal = [b'\x0F',b'\x4D',b'\x02',b'\x16',b'\x0B']
Flash = [b'\x0E',b'\x4B',b'\xE2',b'\x1D',b'\x1C']
Crash = [b'\x10',b'\x4E',b'\x22',b'\x0C',b'\x0B']
Spark = [b'\x1A',b'\x27',b'\xE2',b'\x1C',b'\x19']
Snake = [b'\x19',b'\x26',b'\xC2',b'\x1C',b'\x17']
Needle = [b'\x14',b'\x21',b'\x22',b'\x17',b'\x0C']
Hard = [b'\x17',b'\x24',b'\x82',b'\x11',b'\x14']
Top = [b'\x18',b'\x25',b'\xA2',b'\x1D',b'\x1C']
Gemini = [b'\x16',b'\x23',b'\x62',b'\x10',b'\x15']
Magnet = [b'\x15',b'\x22',b'\x42',b'\x16',b'\x16']
Shadow = [b'\x1B',b'\x29',b'\x02',b'\x1C',b'\x0B']
if MM3 == True: #!
if randomweapons == False:
if Flashman == True: #Finds which bosses have been generated and assigns weapon data
Pointer = Weaponmenu.pop(0)
Value = Flash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Flash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Flash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Flash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Flash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Cutman == True:
Pointer = Weaponmenu.pop(0)
Value = Cut.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Cut.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Cut.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Cut.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Cut.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Gutsman == True:
Pointer = Weaponmenu.pop(0)
Value = Guts.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Guts.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Guts.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Guts.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Guts.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Iceman == True:
Pointer = Weaponmenu.pop(0)
Value = Ice.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Ice.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Ice.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Ice.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Ice.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Bombman == True:
Pointer = Weaponmenu.pop(0)
Value = Bomb.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Bomb.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Bomb.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Bomb.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Bomb.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Fireman == True:
Pointer = Weaponmenu.pop(0)
Value = Fire.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Fire.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Fire.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Fire.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Fire.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Elecman == True:
Pointer = Weaponmenu.pop(0)
Value = Elec.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Elec.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Elec.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Elec.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Elec.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Bubbleman == True:
Pointer = Weaponmenu.pop(0)
Value = Bubble.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Bubble.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Bubble.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Bubble.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Bubble.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Airman == True:
Pointer = Weaponmenu.pop(0)
Value = Air.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Air.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Air.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Air.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Air.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Quickman == True:
Pointer = Weaponmenu.pop(0)
Value = Quick.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Quick.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Quick.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Quick.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Quick.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Heatman == True:
Pointer = Weaponmenu.pop(0)
Value = Heat.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Heat.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Heat.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Heat.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Heat.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Woodman == True:
Pointer = Weaponmenu.pop(0)
Value = Wood.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Wood.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Wood.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Wood.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Wood.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Metalman == True:
Pointer = Weaponmenu.pop(0)
Value = Metal.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Metal.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Metal.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Metal.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value)#Name 1
Pointer +=2
Value = Metal.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Name 2
if Crashman == True:
Pointer = Weaponmenu.pop(0)
Value = Crash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Weapon value written
Pointer = Weapongraphics.pop(0)
Value = Crash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics Offset 1
Pointer +=1
Value = Crash.pop(0)
Seek = ROM.seek(Pointer,0)
ROM.write(Value) #Graphics 2
Pointer = Weaponname.pop(0)
Value = Crash.pop(0)
Seek | |
<reponame>IntegralDefense/ACE<gh_stars>10-100
# vim: sw=4:ts=4:et
import logging
import os, os.path
import pickle
import re
import shutil
import signal
import tarfile
import tempfile
import threading
import time
import unittest
import uuid
from multiprocessing import Queue, cpu_count, Event
from queue import Empty
import saq, saq.test
from saq.analysis import RootAnalysis, _get_io_read_count, _get_io_write_count, Observable
from saq.constants import *
from saq.database import get_db_connection, use_db, acquire_lock, clear_expired_locks, initialize_node
from saq.engine import Engine, DelayedAnalysisRequest, add_workload
from saq.network_client import submit_alerts
from saq.observables import create_observable
from saq.test import *
from saq.util import *
class TestCase(ACEEngineTestCase):
def test_controlled_stop(self):
engine = Engine()
try:
engine.start()
engine.controlled_stop()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_immediate_stop(self):
engine = Engine()
try:
engine.start()
engine.stop()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_signal_TERM(self):
engine = Engine()
try:
engine.start()
def _send_signal():
wait_for_log_count('waiting for engine process', 1)
os.kill(engine.engine_process.pid, signal.SIGTERM)
t = threading.Thread(target=_send_signal)
t.start()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_signal_INT(self):
engine = Engine()
try:
engine.start()
def _send_signal():
wait_for_log_count('waiting for engine process', 1)
os.kill(engine.engine_process.pid, signal.SIGINT)
t = threading.Thread(target=_send_signal)
t.start()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_single_process(self):
# test starting and stopping in single-process mode
engine = Engine(single_threaded_mode=True)
try:
engine.start()
except KeyboardInterrupt:
pass
def test_engine_default_pools(self):
# test starting with no analysis pools defined
engine = Engine()
engine.start()
engine.stop()
engine.wait()
# we should see this log message
regex = re.compile(r'no analysis pools defined -- defaulting to (\d+) workers assigned to any pool')
results = search_log_regex(regex)
self.assertEquals(len(results), 1)
m = regex.search(results[0].getMessage())
self.assertIsNotNone(m)
self.assertEquals(int(m.group(1)), cpu_count())
@use_db
def test_acquire_node_id(self, db, c):
engine = Engine()
engine.start()
engine.stop()
engine.wait()
# when an Engine starts up it should acquire a node_id for saq.SAQ_NODE
self.assertIsNotNone(saq.SAQ_NODE_ID)
c.execute("""SELECT name, location, company_id, is_primary, any_mode, is_local
FROM nodes WHERE id = %s""", (saq.SAQ_NODE_ID,))
row = c.fetchone()
self.assertIsNotNone(row)
_name, _location, _company_id, _is_primary, _any_mode, _is_local = row
self.assertEquals(_name, saq.SAQ_NODE)
self.assertEquals(_location, saq.API_PREFIX)
self.assertEquals(_company_id, saq.COMPANY_ID)
#self.assertIsInstance(_any_mode, int)
#self.assertEquals(_any_mode, 0)
self.assertIsInstance(_is_local, int)
self.assertEquals(_is_local, 0)
@use_db
def test_acquire_local_node_id(self, db, c):
engine = Engine()
engine.set_local()
engine.start()
engine.stop()
engine.wait()
# when a local engine starts up it should acquire a local node with a uuid as the name
self.assertIsNotNone(saq.SAQ_NODE_ID)
c.execute("""SELECT name, location, company_id, is_primary, any_mode, is_local
FROM nodes WHERE id = %s""", (saq.SAQ_NODE_ID,))
row = c.fetchone()
from saq.util import validate_uuid
self.assertIsNotNone(row)
_name, _location, _company_id, _is_primary, _any_mode, _is_local = row
self.assertTrue(validate_uuid(_name))
self.assertEquals(_company_id, saq.COMPANY_ID)
#self.assertIsInstance(_any_mode, int)
#self.assertEquals(_any_mode, 0)
self.assertIsInstance(_is_local, int)
self.assertEquals(_is_local, 1)
def test_analysis_modes(self):
engine = TestEngine()
engine.initialize()
engine.initialize_modules()
# analysis mode test_empty should have 0 modules
self.assertEquals(len(engine.analysis_mode_mapping['test_empty']), 0)
engine = TestEngine()
engine.enable_module('analysis_module_basic_test', 'test_empty')
engine.enable_module('analysis_module_test_delayed_analysis', 'test_empty')
engine.enable_module('analysis_module_test_engine_locking', 'test_empty')
engine.enable_module('analysis_module_test_final_analysis', 'test_empty')
engine.enable_module('analysis_module_test_post_analysis', 'test_empty')
engine.initialize()
engine.initialize_modules()
# analysis mode test_single should have 1 module
self.assertEquals(len(engine.analysis_mode_mapping['test_single']), 1)
self.assertEquals(engine.analysis_mode_mapping['test_single'][0].config_section, 'analysis_module_basic_test')
# analysis mode test_groups should have 5 modules
self.assertEquals(len(engine.analysis_mode_mapping['test_groups']), 5)
# analysis mode test_disabled should have 4 modules (minus basic_test)
self.assertEquals(len(engine.analysis_mode_mapping['test_disabled']), 4)
self.assertTrue('analysis_module_basic_test' not in [m.config_section for m in engine.analysis_mode_mapping['test_disabled']])
def test_single_process_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
#engine.controlled_stop() # redundant
engine.single_threaded_start(mode='test_single')
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_multi_process_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_missing_analysis_mode(self):
saq.CONFIG['engine']['default_analysis_mode'] = 'test_single'
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.analysis_mode = None # <-- no analysis mode here
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# the analysis mode should default to test_single
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
#self.assertIsNone(root.analysis_mode)
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
analysis = observable.get_analysis('BasicTestAnalysis')
self.assertIsNotNone(analysis)
def test_invalid_analysis_mode(self):
# an invalid analysis mode happens when you submit an analysis to an engine
# that supports any analysis mode but doesn't have any configuration settings
# for the one that was submitted
# in that case we use the default_analysis_mode
# we're setting the analysis mode to an invalid value
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='foobar')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(local_analysis_modes=[])
engine.default_analysis_mode = 'test_single'
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# the analysis mode should default to test_empty but we should also get a warning
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(log_count('invalid analysis mode') > 0)
def test_multi_process_multi_analysis(self):
uuids = []
for _ in range(3):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
uuids.append((root.uuid, observable.id))
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
for root_uuid, observable_uuid in uuids:
root = RootAnalysis(uuid=root_uuid)
root.storage_dir = storage_dir_from_uuid(root_uuid)
root.load()
observable = root.get_observable(observable_uuid)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_no_enabled_modules(self):
# by default the analysis modules specified for the unit tests are disabled (globally)
# so just starting up an engine should load no modules at all
# even though there are modules enabled for the "test_groups" analysis mode
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('loading module '), 0)
def test_globally_enabled_modules(self):
# if we globally enable ALL modules then we should see the correct modules get loaded
for section in saq.CONFIG.keys():
if not section.startswith('analysis_module_'):
continue
saq.CONFIG[section]['enabled'] = 'yes'
# the config file specifies test_empty,test_single,test_groups,test_disabled,test_cleanup as the
# locally supported analysis modes
# so we should see only the modules assigned to these modes get loaded here
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.controlled_stop()
engine.start()
engine.wait()
# TODO kind of annoying I have to edit this every time I add a new module for testing
# there should be 18 analysis modules loaded
self.assertEquals(log_count('loading module '), 19)
def test_locally_enabled_modules(self):
# if we enable modules locally then ONLY those should get loaded
# first we change the config to globally enable all modules
for section in saq.CONFIG.keys():
if not section.startswith('analysis_module_'):
continue
saq.CONFIG[section]['enabled'] = 'yes'
engine = TestEngine(analysis_pools={'test_groups': 1})
# this is the only module that should get loaded
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# even though 5 are specified and globally enabled, only 1 is loaded
self.assertEquals(log_count('loading module '), 1)
self.assertEquals(log_count('loading module analysis_module_basic_test'), 1)
def test_no_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
# this test should return False instead of an Analysis
observable = root.add_observable(F_TEST, 'test_2')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
from saq.modules.test import BasicTestAnalysis
# so this should come back as False
self.assertTrue(isinstance(observable.get_analysis(BasicTestAnalysis), bool))
self.assertFalse(observable.get_analysis(BasicTestAnalysis))
def test_time_range_grouped_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable_1 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 12:00:00'))
observable_2 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 12:10:00'))
observable_3 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 14:00:00'))
observable_4 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 10:00:00'))
root.analysis_mode = 'test_groups'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_grouped_time_range', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable_1 = root.get_observable(observable_1.id)
observable_2 = root.get_observable(observable_2.id)
observable_3 = root.get_observable(observable_3.id)
observable_4 = root.get_observable(observable_4.id)
from saq.modules.test import GroupedByTimeRangeAnalysis
# observations 3 and 4 should have analysis
self.assertTrue(bool(observable_3.get_analysis(GroupedByTimeRangeAnalysis)))
self.assertTrue(bool(observable_4.get_analysis(GroupedByTimeRangeAnalysis)))
# either 1 or 2 should have it but not both (logical xor)
self.assertTrue(bool(observable_1.get_analysis(GroupedByTimeRangeAnalysis)) ^ bool(observable_2.get_analysis(GroupedByTimeRangeAnalysis)))
# and one of these should be a grouping target
self.assertTrue(observable_1.grouping_target or observable_2.grouping_target)
# remember which one was the grouping target
grouping_target = observable_1 if observable_1.grouping_target else observable_2
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_grouping_target', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable_1 = root.get_observable(observable_1.id)
observable_2 = root.get_observable(observable_2.id)
grouping_target = root.get_observable(grouping_target.id)
from saq.modules.test import GroupingTargetAnalysis
# either 1 or 2 should have it but not both (logical xor)
self.assertTrue(bool(observable_1.get_analysis(GroupingTargetAnalysis)) ^ bool(observable_2.get_analysis(GroupingTargetAnalysis)))
# and the one that was previously marked as the grouping target is the one that should have the analysis
self.assertTrue(bool(grouping_target.get_analysis(GroupingTargetAnalysis)))
def test_no_analysis_no_return(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_3')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
from saq.modules.test import BasicTestAnalysis
# so what happens here is even though you return nothing from execute_analysis
# execute_final_analysis defaults to returning False
self.assertFalse(observable.get_analysis(BasicTestAnalysis))
# you should also get a warning log
wait_for_log_count('is not returning a boolean value', 1, 5)
def test_delayed_analysis_single(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, '0:01|0:05')
root.save()
root.schedule()
engine | |
#/usr/bin/env python
"""
globiconfig/Main.py - globifest Config Main Application
Copyright 2018-2019, <NAME>, Garmin Ltd, or its subsidiaries.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import tkinter
import tkinter.filedialog
import tkinter.messagebox
import tkinter.ttk
from Globiconfig import CheckBoxCombo, CheckBoxText, FilterText
from GlobifestLib import Builder, DefTree, Log, ManifestParser, Util
ACCEL = Util.create_enum(
"CONTROL"
)
ACCELERATOR = [
Util.Container(text="Ctrl", bind="Control")
]
G_ID = Util.create_enum(
"FRM_MAIN"
)
PADDING = 5
STICKY_FILL = tkinter.N + tkinter.W + tkinter.E + tkinter.S
PANE_0_MINWIDTH = 100
PANE_1_MINWIDTH = 100
WINDOW_MINWIDTH = 300
WINDOW_MINHEIGHT = 200
DIVIDER_WIDTH = 4
assert (PANE_0_MINWIDTH + PANE_1_MINWIDTH) <= WINDOW_MINWIDTH
CFG_TAG = Util.create_enum(
"MENU",
"PARAM"
)
def gui_child_sorter(children):
"""Sorter (PEP 265) for children to be shown in the config tree"""
return sorted(children, key=DefTree.DefForest.ChildNameGetter())
def gui_param_sorter(params):
"""Sorter (PEP 265) for parameters to be shown in the config tree"""
return sorted(params, key=DefTree.DefForest.ParamTextGetter())
class CfgTreeObserver(object):
"""Observer for DefForest to add items to the config tree"""
def __init__(self, cfg_tree, param_tbl):
# Links to objects owned by application
self.cfg_tree = cfg_tree
self.param_tbl = param_tbl
# Temporary workspace variables
self.param_tree = list()
self.root_stack = list()
self.counter_stack = list()
self.level = 0
def on_param(self, param):
"""Save parameters to be added after all children"""
self.param_tree[-1].append(param)
def on_scope_begin(self, title, description):
"""Add a child item"""
# Add a new empty param tree level
self.param_tree.append(list())
self.level += 1
if self.level == 1:
# Only process parameters in the top scope
return
# Incrememt the counter; each child will be identified within its
# parent level numerically: 1, 2, 3, etc...
if self.counter_stack:
self.counter_stack[-1] += 1
else:
self.counter_stack.append(0)
# Join the counters together to form a unique IID representative of
# its position in the tree: 1, 1_1, 2_1_3, etc...
new_iid = "_".join(str(c) for c in self.counter_stack)
if self.root_stack:
parent = self.root_stack[-1]
else:
parent = ""
# Add a new element to this tree level
self.counter_stack.append(0)
self.cfg_tree.insert(
parent=parent,
index="end",
iid=new_iid,
text=title,
values=(description,),
tags=(CFG_TAG.MENU,)
)
# Add the new IID to the root stack for children in this scope
self.root_stack.append(new_iid)
def on_scope_end(self):
"""Pop the current scope level"""
self.level -= 1
if self.level == 0:
# Adjust parent for top-level parameters
parent = ""
else:
parent = self.root_stack[-1]
# Add all saved params, so that they appear below the child trees
for p in self.param_tree[-1]:
pid = p.param.get_identifier()
self.param_tbl[pid] = p
self.cfg_tree.insert(
parent=parent,
index="end",
iid=pid,
text=p.param.get_text(),
values=(pid,),
tags=(CFG_TAG.PARAM,)
)
# Clear the param tree for the next iteration
self.param_tree.pop()
if self.level == 0:
# Only process parameters in top scope
return
self.root_stack.pop()
self.counter_stack.pop()
class App(object):
"""
Main Application
"""
APP_TITLE = "Globiconfig"
def __init__(self, project_file, out_dir):
self.project_file = project_file
self.out_dir = out_dir
self.project = None
self.param_tbl = Util.Container()
self.settings_view_tbl = Util.Container()
self.settings_cache = Util.Container()
self.cur_tree_item = None
self._modified = False
self._opendir = "."
# Menus that need to be accessed later
self.file_menu = None
# Set up tkinter app root; this is not a super-class so the API is private
self.app_root = tkinter.Tk()
self.app_root.title(self.APP_TITLE)
self.app_root.minsize(width=WINDOW_MINWIDTH, height=200)
# Forward declarations to keep pylint happy
self.cfg_tree = None
self.cfg_tree_h_scrollbar = None
self.cfg_tree_v_scrollbar = None
self.desc_frame = None
self.desc_txt = None
self.pane_divider = None
self.settings_frame = None
self.settings_layer_cmb = None
self.settings_layer_lbl = None
self.settings_variant_cmb = None
self.settings_variant_lbl = None
self.src_frame = None
self.src_txt = None
self.value_cmb = None
self.value_frame = None
self.value_stub = None
self.value_txt = None
# Set up control variables
self.cur_layer = tkinter.StringVar()
self.last_layer = ""
self.cur_variant = tkinter.StringVar()
self.last_variant = ""
self.value_change_enable = True
self.last_pid = None
# Divide the window into two panes, which stretch according to the divider's size
# pane_divider and its frames don't use normal grid layout, so these are setup
# differently than the other controls.
self._add_leaf_control(
"pane_divider",
tkinter.PanedWindow(
self.app_root,
sashwidth=DIVIDER_WIDTH,
sashrelief=tkinter.SUNKEN
)
)
self.pane_0 = tkinter.ttk.Frame(self.pane_divider, padding=PADDING)
self.pane_divider.add(self.pane_0)
self.pane_1 = tkinter.ttk.Frame(self.pane_divider, padding=PADDING)
self.pane_divider.add(self.pane_1)
self.pane_divider.paneconfigure(
self.pane_0,
padx=PADDING,
pady=PADDING,
minsize=PANE_0_MINWIDTH
)
self.pane_divider.paneconfigure(
self.pane_1,
padx=PADDING,
pady=PADDING,
minsize=PANE_1_MINWIDTH
)
# Make the cell containing the pane divider autoresize, which in turn allows the
# panes and their children resize.
self.app_root.grid_columnconfigure(0, weight=1)
self.app_root.grid_rowconfigure(0, weight=1)
# Populate controls
self.create_menu_bar()
self.create_pane_0()
self.create_pane_1()
def add_menu_item(self, top_menu, cmd):
"""Add a single menu item to top_menu"""
if cmd == "-":
top_menu.add_separator()
return
text = cmd.t
a_pos = text.find("&")
menustate = cmd.get("s", "normal")
if a_pos == -1:
top_menu.add_command(label=text, command=cmd.f, state=menustate)
else:
a_key = text[a_pos+1]
text = text[:a_pos] + text[a_pos+1:]
a_type = cmd.get("a", ACCEL.CONTROL)
a_text = "{}+{}".format(ACCELERATOR[a_type].text, a_key.upper())
binding = "<{}-{}>".format(ACCELERATOR[a_type].bind, a_key.lower())
top_menu.add_command(
label=text,
underline=a_pos,
command=cmd.f,
accelerator=a_text,
state=menustate
)
self.app_root.bind_all(binding, cmd.f)
def add_menu_items(self, top_menu, cmd_list):
""""Add a list of menu items to top_menu"""
for i in cmd_list:
self.add_menu_item(top_menu, i)
def create_menu_bar(self):
"""Create the menu bar"""
M = Util.Container
top_menu = tkinter.Menu(self.app_root)
self.file_menu = tkinter.Menu(top_menu, tearoff=0)
self.add_menu_items(self.file_menu, [
M(t="&Open...", f=self.on_menu_file_open),
M(t="&Save", f=self.on_menu_file_save, s="disabled"),
M(t="&Close", f=self.on_menu_file_close, s="disabled"),
"-",
M(t="E&xit", f=self.on_menu_file_exit)
])
top_menu.add_cascade(label="File", underline=0, menu=self.file_menu)
self.add_menu_item(top_menu, M(t="About!", f=self.on_menu_about))
self.app_root.config(menu=top_menu)
def create_pane_0(self):
"""
Create the controls on the left side of the window
"""
# Auto-resize child objects to pane width
self.pane_0.grid_columnconfigure(0, weight=1)
self.pane_0.grid_rowconfigure(0, weight=1)
self._add_leaf_control(
"cfg_tree",
tkinter.ttk.Treeview(
self.pane_0,
selectmode='browse',
show="tree"
)
)
self._add_leaf_control(
"cfg_tree_h_scrollbar",
tkinter.ttk.Scrollbar(self.pane_0, orient=tkinter.HORIZONTAL),
row=1,
col=0
)
self._add_leaf_control(
"cfg_tree_v_scrollbar",
tkinter.ttk.Scrollbar(self.pane_0, orient=tkinter.VERTICAL),
row=0,
col=1
)
# Crosslink scroll bars and tree
self.cfg_tree_h_scrollbar.config(command=self.cfg_tree.xview)
self.cfg_tree_v_scrollbar.config(command=self.cfg_tree.yview)
self.cfg_tree.configure(
xscrollcommand=self.cfg_tree_h_scrollbar.set,
yscrollcommand=self.cfg_tree_v_scrollbar.set
)
# Bind selection handler to this object
def bind_cfg_tree_cb(_event=None):
"""Binding method to call the handler"""
iid = self.cfg_tree.focus()
sel = self.cfg_tree.item(iid)
self.on_cfg_tree_click(sel["values"][0], sel["tags"][0])
self.cfg_tree.bind("<<TreeviewSelect>>", bind_cfg_tree_cb)
def create_pane_1(self):
"""
Create the controls on the right side of the window
"""
# Make all controls expand horizontally with the window
self.pane_1.grid_columnconfigure(0, weight=1)
# Rows are configured in each function
self.create_pane_1_view(0)
self.create_pane_1_description(1)
self.create_pane_1_value(2)
self.create_pane_1_source(3)
def create_pane_1_view(self, row):
"""
Create the view controls on the right side of the window
"""
# The view controls do not expand vertically with the window
self.pane_1.grid_rowconfigure(row, weight=0)
# View area
self._add_container_control(
"settings_frame",
tkinter.ttk.LabelFrame(
self.pane_1,
text="Settings View",
padding=PADDING
),
row=row,
num_cols=2,
num_rows=2
)
# Do not resize label row
self.settings_frame.grid_rowconfigure(0, weight=0)
# Layer controls
self._add_leaf_control(
"settings_layer_lbl",
tkinter.ttk.Label(self.settings_frame, text="Layer"),
row=0,
col=0
)
self._add_leaf_control(
"settings_layer_cmb",
tkinter.ttk.Combobox(
self.settings_frame,
textvariable=self.cur_layer,
height=1,
),
row=1,
col=0
)
self.settings_layer_cmb.state(["readonly"])
# Bind write handler to this object
def cur_layer_cb(*args):
"""Binding method to call the handler"""
self.on_cur_layer_changed(self.cur_layer.get())
self.cur_layer.trace("w", cur_layer_cb)
# Variant controls
self._add_leaf_control(
"settings_variant_lbl",
tkinter.ttk.Label(self.settings_frame, text="Variant"),
row=0,
col=1
)
self._add_leaf_control(
"settings_variant_cmb",
tkinter.ttk.Combobox(self.settings_frame, textvariable=self.cur_variant, height=1),
row=1,
col=1
)
self.settings_variant_cmb.state(["readonly"])
# Bind write handler to this object
def cur_variant_cb(*args):
"""Binding method to call the handler"""
self.on_cur_variant_changed(self.cur_variant.get())
self.cur_variant.trace("w", cur_variant_cb)
def create_pane_1_description(self, row):
"""
Create the description box on the right side of the window
"""
# Make this row expand with the size of the window
self.pane_1.grid_rowconfigure(row, weight=1)
self._add_container_control(
"desc_frame",
tkinter.ttk.LabelFrame(
self.pane_1,
text="Description",
padding=PADDING
),
row=row
)
self._add_leaf_control(
"desc_txt",
tkinter.Text(
self.desc_frame,
font="TkFixedFont",
relief=tkinter.FLAT,
background=tkinter.ttk.Style().lookup("TLabelFrame", "background"),
wrap=tkinter.WORD
)
)
self.set_description("")
def create_pane_1_source(self, row):
"""
Create the source box on the right side of the window
"""
# Do not resize source | |
**self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.delete_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_pps_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, _policy.id)
def test_get_policy_packet_rate_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.pps_rule.id)
def test_get_policy_packet_rate_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_packet_rate_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_packet_rate_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id, self.rule_data)
def test_delete_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_PACKET_RATE_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
def test_create_min_pps_rule_on_bound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='compute:fake-zone')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
self.assertRaises(
NotImplementedError,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, self.rule_data)
def test_create_min_pps_rule_on_unbound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
try:
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, _policy.id, self.rule_data)
except NotImplementedError:
self.fail()
def test_create_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
setattr(_policy, "rules", [self.min_pps_rule])
rules = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
for new_rule_data in rules:
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
for rule_data in rules:
min_pps_rule = rule_object.QosMinimumPacketRateRule(
self.ctxt, **rule_data['minimum_packet_rate_rule'])
setattr(_policy, "rules", [min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_create_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_create_policy_min_pps_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
new_rule_data = {
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 1234,
'direction': self.min_pps_rule.direction,
},
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
rules_data = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
for rule_data in rules_data:
rules = [
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[0]['minimum_packet_rate_rule']),
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[1]['minimum_packet_rate_rule']),
]
setattr(_policy, 'rules', rules)
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, rule_data['minimum_packet_rate_rule']['id'],
self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_update_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_update_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.delete_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, _policy.id)
def test_delete_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(
self.ctxt, id=self.min_pps_rule.id)
def test_get_policy_min_pps_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_min_pps_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rules,
self.ctxt, self.policy.id)
def test_get_min_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'min_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
self.assertEqual(
qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_min_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
class QoSRuleAliasTestExtensionManager(object):
def get_resources(self):
return qos_rules_alias.Qos_rules_alias.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class QoSRuleAliasMinimumPacketRateTestExtensionManager(object):
def get_resources(self):
return qos_pps_minimum_rule_alias.Qos_pps_minimum_rule_alias.\
get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'bandwidth_limit': rule_object.QosBandwidthLimitRule,
'dscp_marking': rule_object.QosDscpMarkingRule,
'minimum_bandwidth': rule_object.QosMinimumBandwidthRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'bandwidth_limit_rule': {'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'dscp_mark': 16},
'minimum_bandwidth_rule': {'min_kbps': 10}
}
def _update_rule(self, rule_type, rule_id, **kwargs):
data = {'alias_%s_rule' % rule_type: kwargs}
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_update_request(resource, data, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _show_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _delete_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_delete_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@mock.patch.object(qos_plugin.QoSPlugin, "update_policy_rule")
def test_update_rule(self, update_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._update_rule(rule_type, rule_id, **data)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id, {rule_data_name: data}))
update_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "get_policy_rule")
def test_show_rule(self, get_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=rule):
self._show_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
get_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "delete_policy_rule")
def test_delete_rule(self, delete_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._delete_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
delete_policy_rule_mock.assert_has_calls(calls, any_order=True)
def test_show_non_existing_rule(self):
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=None):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
class TestQoSRuleAliasMinimumPacketRate(TestQoSRuleAlias):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasMinimumPacketRateTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'minimum_packet_rate': rule_object.QosMinimumPacketRateRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'minimum_packet_rate_rule': {'min_kpps': 10, 'direction': 'any'}
}
class TestQosPluginDB(base.BaseQosTestCase):
PORT_ID = 'f02f160e-1612-11ec-b2b8-bf60ab98186c'
QOS_MIN_BW_RULE_ID = '8bf8eb46-160e-11ec-8024-9f96be32099d'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 8bf8eb46-160e-11ec-8024-9f96be32099d
MIN_BW_REQUEST_GROUP_UUID = 'c8bc1b27-59a1-5135-aa33-aeecad6093f4'
MIN_BW_RP = 'd7bea120-1626-11ec-9148-c32debfcf0f6'
QOS_MIN_PPS_RULE_ID = '6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb
MIN_PPS_REQUEST_GROUP_UUID = '995008f4-f120-547a-b051-428b89076067'
MIN_PPS_RP = 'e16161f4-1626-11ec-a5a2-1fc9396e27cc'
def setUp(self):
super(TestQosPluginDB, self).setUp()
self.setup_coreplugin(load_plugins=False)
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.context = context.get_admin_context()
self.project_id = uuidutils.generate_uuid()
def _make_qos_policy(self):
qos_policy = policy_object.QosPolicy(
self.context, project_id=self.project_id, shared=False,
is_default=False)
qos_policy.create()
return qos_policy
def _make_qos_minbw_rule(self, policy_id, direction='ingress',
min_kbps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumBandwidthRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kbps=min_kbps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_qos_minpps_rule(self, policy_id, direction='ingress',
min_kpps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumPacketRateRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kpps=min_kpps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_port(self, network_id, qos_policy_id=None, port_id=None,
qos_network_policy_id=None, device_owner=None):
port_id = port_id if port_id else uuidutils.generate_uuid()
base_mac = ['aa', 'bb', 'cc', 'dd', 'ee', 'ff']
mac = netaddr.EUI(next(net_utils.random_mac_generator(base_mac)))
device_owner = device_owner if device_owner else '3'
port | |
<filename>main.py
import os
import random
from csv import reader
from json import dumps, load, loads, dump
from os import getpid
from sys import argv
from threading import Thread, current_thread
from time import sleep, time
from PyQt5.QtGui import QFontDatabase
import psutil
from PyQt5 import QtCore, QtWidgets
from PyQt5.Qt import QApplication, QMainWindow, QWidget, QWindow, QFormLayout
from PyQt5.QtCore import QProcess
from selenium import webdriver
from win32 import win32process
from win32gui import FindWindow
from PyQt5.QtWidgets import QComboBox, QDialog, QDialogButtonBox, QFileDialog, QHBoxLayout, QMessageBox, QTreeWidgetItem, QVBoxLayout
from PyQt5.Qt import QWindow, QLabel, QLineEdit
from extension import Element, Page, subClass, Struct, sequentialStruct, Field
from Ui_main import Ui_MainWindow
from utils import receipt, stop_thread
_translate = QtCore.QCoreApplication.translate
class QD(QDialog):
"""
QD
Args:
QDialog ([type]): [description]
"""
def __init__(self,
fparent,
parent=None,
title="",
accepted=lambda x: print('accepted')):
super().__init__(parent)
self.fparent = fparent
self.setWindowModality(QtCore.Qt.WindowModal)
if title:
self.setWindowTitle(title)
self.accepted = accepted
def show(self):
layout = QFormLayout(self)
for n, f in self.fparent.fields.items():
layout.addRow(QLabel(n), f.askQt(self))
buttonBox = QDialogButtonBox()
buttonBox.setOrientation(QtCore.Qt.Horizontal) # 设置为水平方向
buttonBox.setStandardButtons(QDialogButtonBox.Ok
| QDialogButtonBox.Cancel)
layout.addRow(buttonBox)
buttonBox.accepted.connect(lambda x=self: self.accepted(x)) # 确定
buttonBox.rejected.connect(self.close)
self.setLayout(layout)
super().show()
class ElementQd(QDialog):
def __init__(self,
item,
title,
parent=None,
accepted=lambda x: print('accepted')):
super().__init__(parent)
self.setWindowModality(QtCore.Qt.WindowModal)
if title:
self.setWindowTitle(title)
self.accepted = accepted
self.item = item
def show(self):
pass
qb = QComboBox(self)
tag = self.item.data['localName']
elements = self.parent().tags[tag]
for element in elements:
qb.addItem(element.name())
self.qb = qb
buttonBox = QDialogButtonBox()
buttonBox.setOrientation(QtCore.Qt.Horizontal) # 设置为水平方向
buttonBox.setStandardButtons(QDialogButtonBox.Ok
| QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.pull) # 确定
buttonBox.rejected.connect(self.close)
layout = QVBoxLayout(self)
layout.addWidget(self.qb)
layout.addWidget(buttonBox)
super().show()
def pull(self):
currentIndex = self.qb.currentIndex()
tag = self.item.data['localName']
currentElement = self.parent().tags[tag][currentIndex]
self.qd = QD(currentElement,
self,
'Add ELement',
lambda x=self: self.accepted(x))
self.qd.data = self.item.data
self.qd.show()
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.bindJs = self.readJs()
self.setupUi(self)
def setupUi(self, MainWindow):
"""注册安装组件
"""
super().setupUi(MainWindow)
self.verifyDriver()
self.setupChrome()
self.setupExtension()
self.setupTimer()
self.selectDemoButton.clicked.connect(self.selectDemo)
self.saveDemoButton.clicked.connect(self.saveDemo)
self.selectTaskButton.clicked.connect(self.selectTask)
self.pauseTaskButton.clicked.connect(self.pauseTask)
self.startTaskButton.clicked.connect(self.startTask)
self.subClass = {
'Struct': self.subStruct,
'Element': self.subElement,
'Page': self.subPage
}
self.chromeTask = None
self.threadTask = None
self.isworking = False
# TODO
# self.rightTree.itemDoubleClicked.connect(self)
print()
def verifyDriver(self):
"""
TODO:验证或下载驱动
"""
pass
def setupExtension(self):
"""
安装扩展
"""
self.setupStructExtension()
self.setupPageExtension()
self.setupElementExtension()
self.actionBind.triggered.connect(lambda x: self.addBinds())
self.actionRemoveElements.triggered.connect(
lambda: self.removeElements())
self.rightTree.expandAll()
def setupStructExtension(self):
"""
"""
print(subClass(Struct))
self.subStruct = dict()
self.structMenu.clear()
while self.structTree.topLevelItem(0):
self.structTree.takeTopLevelItem(0)
for ss in subClass(Struct):
if ss is not sequentialStruct:
action = QtWidgets.QAction(self)
action.setCheckable(True)
action.setObjectName('structSelect' + ss.name())
action.setText(_translate("MainWindow", ss.name()))
action.triggered.connect(
lambda checked, ss=ss: self.setupStruct(ss))
self.structMenu.addAction(action)
self.subStruct[ss.name()] = {
'class_': ss,
'action': action,
'load': False
}
if ss.autoLoad:
self.setupStruct(ss)
else:
self.subStruct[ss.name()] = {'class_': ss, 'load': True}
self.structTree.itemDoubleClicked.connect(self.addStruct)
self.structTree.expandAll()
self.structDemo = sequentialStruct()
self.structDemo.load({})
item = self.itemWithField(self.rightTree, self.structDemo)
self.structDemo.setItem(item)
self.itemDemo = item
def setupStruct(self, ss):
"""
setupStruct
"""
print('addPageExtension', ss.name())
ssdict = self.subStruct[ss.name()]
ssdict['load'] = not ssdict['load']
action = ssdict['action']
action.setChecked(ssdict['load'])
if ssdict['load']:
item = self.itemWithField(self.structTree, ss)
item.class_ = ss
else:
i = 0
while self.structTree.topLevelItem(i):
if self.structTree.topLevelItem(i).text(0) == ss.name():
self.structTree.takeTopLevelItem(i)
self.structTree.update()
return
else:
i += 1
def addStruct(self, item, column):
"""
addStruct
"""
while not hasattr(item, 'class_'):
item = item.parent()
# if issubclass(item.class_, (Struct)):
qd = QD(item.class_, self, 'AddStruct', self.addStructGui)
qd.show()
def addStructGui(self, qd: QD):
"""
AddStructGui
"""
tmp = []
i = 0
for i in range(qd.layout().count()):
item = qd.layout().itemAt(i).widget()
if isinstance(item, QLabel):
continue
tmp.append(item)
tmp = tmp[:len(qd.fparent.fields)]
fields = {}
for (fn, f), q in zip(qd.fparent.fields.items(), tmp):
r, fields[fn] = f.loads(q)
if not r:
QMessageBox.information(
qd, 'add Page wrong',
f'{fn} has a wrong input!\nTry it again.', QMessageBox.Ok)
return
qd.close()
self.addStructJson(qd.fparent, fields)
def addStructJson(self, ss, fields):
"""
AddStructjson
"""
struct = ss()
struct.load(fields)
self.structDemo.add(struct)
item = self.itemWithField(self.itemDemo, struct, fields)
item.class_ = struct
struct.setItem(item)
self.structDemo = struct
self.itemDemo = item
def setupPageExtension(self):
"""
安装Page扩展
"""
print(subClass(Page))
column = 0
self.subPage = dict()
self.pageMenu.clear()
while self.pageTree.topLevelItem(0):
self.pageTree.takeTopLevelItem(0)
for sp in subClass(Page):
action = QtWidgets.QAction(self)
action.setCheckable(True)
action.setObjectName('pageSelect' + sp.name())
action.setText(_translate("MainWindow", sp.name()))
action.triggered.connect(lambda checked, sp=sp: self.setupPage(sp))
self.pageMenu.addAction(action)
self.subPage[sp.name()] = {
'class_': sp,
'action': action,
'load': False
}
if sp.autoLoad:
self.setupPage(sp)
self.pageTree.expandAll()
self.pageTree.itemDoubleClicked.connect(self.addPage)
def itemWithField(self, parent, class_, fields=None):
item = QTreeWidgetItem(parent)
item.setText(0, _translate('MainWindow', class_.name()))
item.class_ = class_
for fn in class_.fields:
tmp = QTreeWidgetItem(item)
if hasattr(class_, 'fields_'):
tmp.setText(
0, _translate('MainWindow', f'{fn}:{class_.fields_[fn]}'))
elif fields:
tmp.setText(0, _translate('MainWindow', f'{fn}:{fields[fn]}'))
else:
tmp.setText(0, _translate('MainWindow', f'{fn}'))
return item
def itemElement(self, parent, data, se=None, fields={}):
if se:
item = self.itemWithField(parent, se, fields)
item.setText(
0,
_translate('MainWindow',
f'{se.name()}:{data["seleniumXpath"]}'))
else:
item = QTreeWidgetItem(parent)
item.class_ = Element
item.setText(0, _translate('MainWindow', data['seleniumXpath']))
for n in ['localName', 'className']:
if n in data:
tmp = QTreeWidgetItem(item)
tmp.setText(0, _translate('MainWindow', f'{n}:{data[n]}'))
item.class_ = se
return item
def setupPage(self, sp: Page):
"""
安装Page扩展
Args:
sp (Page): [description]
"""
print('addPageExtension', sp.name())
column = 1
spdict = self.subPage[sp.name()]
spdict['load'] = not spdict['load']
action = spdict['action']
action.setChecked(spdict['load'])
if spdict['load']:
item = self.itemWithField(self.pageTree, sp)
item.class_ = sp
else:
i = 0
while self.pageTree.topLevelItem(i):
if self.pageTree.topLevelItem(i).text(0) == sp.name():
self.pageTree.takeTopLevelItem(i)
self.pageTree.update()
return
else:
i += 1
def addPage(self, item, column):
"""
addPage
"""
while not hasattr(item, 'class_'):
item = item.parent()
qd = QD(item.class_, self, 'AddPage', self.addPageGui)
qd.show()
def addPageGui(self, qd: QD):
"""
Args:
qd (QD): [description]
"""
tmp = []
i = 0
print(qd.layout().count())
for i in range(qd.layout().count()):
item = qd.layout().itemAt(i).widget()
if isinstance(item, QLabel):
continue
tmp.append(item)
tmp = tmp[:len(qd.fparent.fields)]
fields = {}
for (fn, f), q in zip(qd.fparent.fields.items(), tmp):
r, fields[fn] = f.loads(q)
if not r:
QMessageBox.information(
qd, 'add Page wrong',
f'{fn} has a wrong input!\nTry it again.', QMessageBox.Ok)
return
qd.close()
self.addPageJson(qd.fparent, fields)
def addPageJson(self, sp, fields):
"""
AddStructjson
"""
page = sp()
page.load(fields)
self.structDemo.add(page)
item = self.itemWithField(self.itemDemo, page, fields)
page.setItem(item)
item.class_ = page
def setupElementExtension(self):
"""
安装Element扩展
"""
column = 0
self.subElement = dict()
self.tags = {}
for se in subClass(Element):
action = QtWidgets.QAction(self)
action.setCheckable(True)
action.setObjectName('elementSelect' + se.name())
action.setText(_translate("MainWindow", se.name()))
action.triggered.connect(
lambda checked, se=se: self.setupElement(se))
print(action.triggered)
self.elementMenu.addAction(action)
self.subElement[se.name()] = {
'class_': se,
'action': action,
'load': False
}
if se.autoLoad:
self.setupElement(se)
self.elementTree.itemDoubleClicked.connect(self.addElement)
print(self.elementTree.itemPressed.connect(lambda x, y: print(x, y)))
def setupElement(self, se: Element):
"""
安装Element扩展
"""
print('setupElement', se.name())
column = 0
sedict = self.subElement[se.name()]
sedict['load'] = not sedict['load']
action = sedict['action']
action.setChecked(sedict['load'])
if sedict['load']:
for tag in se.tags:
if tag in self.tags:
self.tags[tag].append(se)
else:
self.tags[tag] = [se]
self.addBinds(tag)
else:
for tag in se.tags:
self.tags[tag].remove(se)
if not self.tags[tag]:
self.removeBinds(tag)
self.tags.pop(tag)
def addElement(self, item, column):
"""
addElement
"""
while not hasattr(item, 'class_'):
item = item.parent()
qd = ElementQd(item, 'AddElement', self, self.addElementGui)
qd.show()
def removeElement(self, item, column):
while not hasattr(item, 'class_'):
item = item.parent()
i = 0
tmp = self.elementTree.topLevelItem(i)
while tmp:
if tmp is item:
self.elementTree.takeTopLevelItem(i)
self.elements.pop(item.data['seleniumXpath'])
return
i += 1
tmp = self.elementTree.topLevelItem(i)
def removeElements(self):
self.elements = {}
while self.elementTree.topLevelItem(0):
self.elementTree.takeTopLevelItem(0)
def removeAllElement(self):
item = self.leftTree.topLevelItem(0)
while item:
self.removeElement(item, 0)
item = self.leftTree.topLevelItem(0)
def addElementGui(self, qd: QD):
"""
Args:
qd (QD): [description]
"""
tmp = []
i = 0
print(qd.layout().count())
for i in range(qd.layout().count()):
item = qd.layout().itemAt(i).widget()
if isinstance(item, QLabel):
continue
tmp.append(item)
tmp = tmp[:len(qd.fparent.fields)]
fields = {}
for (fn, f), q in zip(qd.fparent.fields.items(), tmp):
r, fields[fn] = f.loads(q)
if not r:
QMessageBox.information(
qd, 'add Page wrong',
f'{fn} has a wrong input!\nTry it again.', QMessageBox.Ok)
return
qd.parent().close()
self.removeElement(qd.parent().item, 0)
qd.close()
self.addElementJson(
qd.fparent,
qd.data,
fields,
)
def addElementJson(self, se, data, fields):
element = se()
element.load(data, fields)
self.structDemo.add(element)
# item = self.itemWithField(self.itemDemo, element, fields)
item = self.itemElement(self.itemDemo, data, element, fields)
item.class_ = element
element.setItem(item)
return
def setupChrome(self):
"""打开并连接chrome
"""
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_experimental_option(
"excludeSwitches", ['enable-automation', 'enable-logging'])
try:
self.chrome = webdriver.Chrome(options=options, keep_alive=True)
except Exception as e:
print(e)
cwid = 0
while not cwid:
if not self.chrome.title:
cwid = FindWindow(None,
self.chrome.current_url + ' - Google Chrome')
else:
cwid = FindWindow(None, self.chrome.title + ' - Google Chrome')
pWin = QWindow.fromWinId(cwid)
pWid = QWidget.createWindowContainer(pWin, self)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(pWid)
# self.chrome.get('https://www.baidu.com')
self.middle.setLayout(layout)
self.middle.update()
def readJs(self, path='utils/script.js'):
"""读取绑定Js
"""
with open(path, 'r', encoding='utf-8') as f:
return f.read()
def addBinds(self, tag=''):
# self.chrome.execute_script(self.bindJs)
print('addBinds', tag)
if tag:
self.chrome.execute_script(f'{self.bindJs};\nadd_binds("{tag}");')
else:
for tag in self.tags:
self.addBinds(tag)
def removeBinds(self, tag):
self.chrome.execute_script(f'{self.bindJs};\nremove_binds("{tag}");')
def setupTimer(self):
self.elements = {}
self.pollingTimer = QtCore.QTimer(self)
self.pollingTimer.timeout.connect(self.polling)
self.pollingTimer.start(1000)
def polling(self):
try:
for r in receipt['data']:
if r['seleniumXpath'] not in self.elements:
self.elements[r['seleniumXpath']] = r
item = self.itemElement(self.elementTree, r)
item.data = r
receipt['data'].remove(r)
except Exception as e:
print(e)
def saveDemo(self):
"""
saveDemo
"""
saveFile = QFileDialog.getSaveFileName(self, '选择一个json文件', './.',
'All(*.*)')[0]
item = self.rightTree.topLevelItem(0)
struct = item.class_
if saveFile:
try:
with open(saveFile, 'w', encoding='utf-8') as f:
dump(struct.dumps(), f)
except Exception as e:
print(e)
def selectTask(self):
readFile = QFileDialog.getOpenFileName(self, '选择一个json文件', './.',
'All(*.*)')[0]
with open(readFile, 'r', encoding='utf-8') as f:
data = | |
<gh_stars>0
#Linear SVM for Two-class Problem
#.......................................................................IMPORTS
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
from sklearn import svm
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
#.....................................................................CONSTANTS
C_list = [0.1, 1 , 10 , 100]
C_best = 1
#.....................................................................FUNCTIONS
def get_classA_dataset():
'''Reads in dataset 1'''
os.chdir('\Data')
df = pd.read_csv('classA.csv', delimiter=',', header = None)
df.columns=['x1','x2']
return df
def get_classB_dataset():
'''Reads in dataset 2'''
os.chdir('')
df = pd.read_csv('classB.csv', delimiter=',', header = None)
df.columns=['x1','x2']
return df
def plot_boundary(model, dataframe, label):
'''Plots the SVM linear line from the fitted model'''
w = model.coef_[0]
a = -w[0] / w[1]
xx = np.linspace((dataframe.iloc[:,0].values).min(),(dataframe.iloc[:,0].values).max())
yy = a * xx - model.intercept_[0] / w[1]
plt.plot(xx, yy, label = label)
return
def plot_boundary_2(model, x_min, x_max, label):
'''Plots the SVM linear line from the fitted model'''
w = model.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(x_min,x_max)
yy = a * xx - model.intercept_[0] / w[1]
plt.plot(xx, yy, label = label)
return
def SVM_classifier(C_list, dataframe):
'''Classifies the dataframe using a fitted SVM model with the C values in C_list, plots the decision boundary of the varying C values'''
X_train = dataframe.iloc[:,0:2].values
y_train = dataframe.iloc[:,2].values
df_0 = dataframe[dataframe['class'] == 0]
df_1 = dataframe[dataframe['class'] == 1]
plt.scatter(df_0.iloc[:,0].values, df_0.iloc[:,1].values, label = 'Class A')
plt.scatter(df_1.iloc[:,0].values, df_1.iloc[:,1].values, label = 'Class B')
plt.xlabel('x1')
plt.ylabel('x2')
for C_i in C_list:
#svc = LinearSVC(C = C_i, max_iter = 200000)
svc = svm.SVC(kernel='linear', C = C_i) # linear SVM
svc.fit(X_train, y_train)
plot_boundary_2(svc, 180, 425, C_i)
plt.legend()
plt.savefig('Q2_part2.png')
plt.show()
return
def accuracy(y_true, y_predicted):
'''Reports the accuracy of two lists of true and predicted values'''
correct = 0
for true, pred in zip(y_true, y_predicted):
if float(true) == float(pred):
correct += 1
accuracy = correct/len(y_predicted)*100
print('Accuracy of classifer {:0.2f} %' .format(accuracy))
return accuracy
def fold_cross_val(dataframe, num_folds, C):
'''10 fold cross validation'''
#Scaled features for faster processing
scaled_features = StandardScaler().fit_transform(dataframe.values)
scaled_features_df = pd.DataFrame(scaled_features, index=dataframe.index, columns=dataframe.columns)
scaled_features_df['class'] = dataframe['class']
#Shuffle Dataframe
df_shuffle = scaled_features_df.iloc[np.random.permutation(len(scaled_features_df))]
df_shuffle = df_shuffle.reset_index(drop=True) #Reset the index to begin at 0
folds = num_folds #Calls number of folds
fold_size = int(df_shuffle.shape[0]/folds) # Determines the size of the folds
accuracy_list = [] #makes empty list to store accuracy values
#y_pred_master = []
#y_test_master = []
start = 0 # initalize the start
end = fold_size # initalize the end
for i in range(folds):
print('\t Calculating fold number {} of {} number if folds...'.format(i+1, folds))
#For the final cut, if the fold makes a sliver of data left over, the test data will take the extra data.
len_dataframe = len(df_shuffle)
if (len_dataframe - end) < fold_size:
end = len_dataframe
#Test Dataframe
df_test = df_shuffle.iloc[start:end] #dataframe of test values from the fold
y_test = df_test.iloc[:,-1] #True values labeled
df_test = df_test.drop(labels='class', axis=1) # removes the label column from df_test
X_test = df_test.iloc[:,0:2].values
#print(X_test)
#Train Dataframe
drop_index = list(range(start,end))
df_train = df_shuffle.drop(drop_index) #, axis = 0)
start += fold_size
end += fold_size
X_train = df_train.iloc[:,0:2].values
y_train = df_train.iloc[:,2].values
#Train SVM
#svc = LinearSVC(C = C, max_iter = 100000)
svc = svm.SVC(kernel='linear', C = C)
svc.fit(X_train, y_train)
#Predict
y_pred = svc.predict(X_test)
#Accuracy
accuracy_i = accuracy(y_test, y_pred)
accuracy_list.append(accuracy_i)
return accuracy_list
def cross_validation(times,dataframe, C):
'''Performs 10 fold cross validation 'times' number of times'''
master_acc = []
for i in range(times):
print('Calculating {} of {} times - 10 fold cross validation...'.format(i, times))
accuracy_list = fold_cross_val(dataframe, 10, C)
master_acc.append(accuracy_list)
accuracy_flat = [y for x in master_acc for y in x]
return accuracy_flat
def stats_info(list_accuracies):
'''Detemines the statistical quantities of mean, varience, and std from the list of accuracies'''
mean = sum(list_accuracies) / len(list_accuracies)
variance = sum([((x - mean) ** 2) for x in list_accuracies]) / len(list_accuracies)
std = variance ** 0.5
print('Mean Cross-Validation Accuracy: \t\t\t{:.2f}'.format(mean))
print('Standard Deviation of Cross-Validation Accuracy: \t{:.2f}'.format(std))
print('Variance of Cross-Validation Accuracy: \t\t\t{:.2f}'.format(variance))
return mean, variance, std
def update_weights(y_weak_pred, y_train, beta_t, sample_weight):
'''Updates the sample weights using the correct predictions'''
correct = y_weak_pred == y_train #Array of boolean values if the predicted values matches the training values
update = []
for x in correct:
if x == 1:
update.append(beta_t) #If correct, update list with beta
else:
update.append(1) #If incorrecet, update list with 1
sample_weight = (sample_weight)*np.array(update)
Z = sample_weight.sum() # Used to re-distribute the weights
sample_weight = sample_weight/Z
return sample_weight
def m1_algorithm(N_samples, X_train, y_train, C, T):
'''Adaboost m1 algorithm'''
N = len(y_train) #Len of the training set
#Initialize the sample weights
sample_weight = np.ones(N)/N
condition = "continue" # Initilze the loop
model_list, alpha_list = [], []
while (condition == "continue"):
row_i = np.random.choice(X_train.shape[0], N_samples, p=sample_weight)
X_train_sample = X_train[row_i]
y_train_sample = y_train[row_i]
#Weak learner linear SVM fit and predict
weak_learner = SVC(kernel='linear', C=C)
weak_learner.fit(X_train_sample, y_train_sample)
y_weak_pred = weak_learner.predict(X_train)
#Step 3: Hypothesis error
incorrect = y_weak_pred != y_train
error_t = np.dot(incorrect, sample_weight)#/sum(sample_weight)
#Step 4:
beta_t = error_t / (1 - error_t)
#Hypothesis weight
alpha_t = np.log(1/beta_t)
if error_t >= 0.5:
continue
else:
sample_weight = update_weights(y_weak_pred, y_train, beta_t, sample_weight)
model_list.append(weak_learner)
alpha_list.append(alpha_t)
if len(alpha_list) == T:
condition = 'break'
return alpha_list, model_list
def predict_ensemble(X_test, alpha_list, model_list):
'''Combines the ensemble of weak learner predictions into one prediction for the class labels'''
y_pred_list = []
for alpha, model in zip(alpha_list, model_list):
y_pred = model.predict(X_test)
y_pred_list.append(y_pred)
y_pred_list = np.asarray(y_pred_list)
y_pred_ensemble = []
for point in range(X_test.shape[0]):
points = y_pred_list[:,point] # Takes the column which is the y values for each model at a singel point.
index_0 = np.where(points == 0)[0] #Index values of 0 class
index_1 = np.where(points == 1)[0] #Index values of 1 class
alpha_array = np.array(alpha_list)
alpha_0 = alpha_array[index_0].sum() #summation of alpha values when class label is 0 for the weak learners
alpha_1 = alpha_array[index_1].sum() #summation of alpha values when class label is 1 for the weak learners
if alpha_0 >= alpha_1:
y_pred_ensemble.append(0)
else:
y_pred_ensemble.append(1)
return y_pred_ensemble
def fold_cross_val_model(dataframe, num_folds):
'''10 fold cross validation'''
#Shuffle Dataframe
df_shuffle = dataframe.iloc[np.random.permutation(len(dataframe))]
df_shuffle = df_shuffle.reset_index(drop=True) #Reset the index to begin at 0
folds = num_folds #Calls number of folds
fold_size = int(df_shuffle.shape[0]/folds) # Determines the size of the folds
accuracy_list = [] #makes empty list to store accuracy values
start = 0 # initalize the start
end = fold_size # initalize the end
for i in range(folds):
print(i)
print('\t Calculating fold number {} of {} number if folds...'.format(i+1, folds))
#For the final cut, if the fold makes a sliver of data left over, the test data will take the extra data.
len_dataframe = len(df_shuffle)
if (len_dataframe - end) < fold_size:
end = len_dataframe
#Test Dataframe
df_test = df_shuffle.iloc[start:end] #dataframe of test values from the fold
y_test = df_test.iloc[:,-1] #True values labeled
df_test = df_test.drop(labels='class', axis=1) # removes the label column from df_test
X_test = df_test.iloc[:,0:2].values
#Train Dataframe
drop_index = list(range(start,end))
df_train = df_shuffle.drop(drop_index) #, axis = 0)
X_train = df_train.iloc[:,0:2].values #Training set X
y_train = df_train.iloc[:,2].values # training set y class labels
#M1 Algortihm
alpha_list, model_list = m1_algorithm(100, X_train, y_train, C_best, 50)
#Ensemble prediction of y
y_pred_ensemble = predict_ensemble(X_test, alpha_list, model_list)
y_test = list(y_test)
accuracy_i = accuracy(y_test, y_pred_ensemble)
accuracy_list.append(accuracy_i)
start += fold_size
end += fold_size
return accuracy_list
def cross_validation_model(times,dataframe):
'''10 times cross validation'''
master_acc = []
for i in range(times):
print('Calculating {} of {} times - 10 fold cross validation...'.format(i, times))
accuracy_list = fold_cross_val_model(dataframe, 10)
master_acc.append(accuracy_list)
accuracy_flat = [y for x in master_acc for y in x]
return accuracy_flat
def main():
Directory = "E:\Documents\Waterloo-Masters\SYDE 675\Assignment 3"
os.chdir(Directory)
dfA = get_classA_dataset()
dfB = get_classB_dataset()
#PART 1 .......................................................................
plt.scatter(dfA.iloc[:,0].values, dfA.iloc[:,1].values, label = 'Class A')
plt.scatter(dfB.iloc[:,0].values, dfB.iloc[:,1].values, label = 'Class B')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.savefig('Q2.part1.png')
plt.show()
dfA['class'] = [0]*dfA.shape[0]
dfB['class'] = [1]*dfB.shape[0]
df = pd.concat([dfA, dfB],ignore_index=True)
#PART 2........................................................................
X_train = df.iloc[:,0:2].values
y_train = df.iloc[:,2].values
SVM_classifier(C_list, df)
| |
<gh_stars>1-10
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from functools import partial
import numpy as np
from numpy.testing import assert_, run_module_suite, assert_allclose
# disable the MC progress bar
import os
from qutip import *
from qutip.random_objects import rand_ket
os.environ['QUTIP_GRAPHICS'] = "NO"
class TestJCModelEvolution:
"""
A test class for the QuTiP functions for the evolution of JC model
"""
def qubit_integrate(self, tlist, psi0, epsilon, delta, g1, g2):
H = epsilon / 2.0 * sigmaz() + delta / 2.0 * sigmax()
c_op_list = []
rate = g1
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sigmam())
rate = g2
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sigmaz())
output = mesolve(
H, psi0, tlist, c_op_list, [sigmax(), sigmay(), sigmaz()])
expt_list = output.expect[0], output.expect[1], output.expect[2]
return expt_list[0], expt_list[1], expt_list[2]
def jc_steadystate(self, N, wc, wa, g, kappa, gamma,
pump, psi0, use_rwa, tlist):
# Hamiltonian
a = tensor(destroy(N), identity(2))
sm = tensor(identity(N), destroy(2))
if use_rwa:
# use the rotating wave approxiation
H = wc * a.dag(
) * a + wa * sm.dag() * sm + g * (a.dag() * sm + a * sm.dag())
else:
H = wc * a.dag() * a + wa * sm.dag() * sm + g * (
a.dag() + a) * (sm + sm.dag())
# collapse operators
c_op_list = []
n_th_a = 0.0 # zero temperature
rate = kappa * (1 + n_th_a)
c_op_list.append(np.sqrt(rate) * a)
rate = kappa * n_th_a
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a.dag())
rate = gamma
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sm)
rate = pump
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sm.dag())
# find the steady state
rho_ss = steadystate(H, c_op_list)
return expect(a.dag() * a, rho_ss), expect(sm.dag() * sm, rho_ss)
def jc_integrate(self, N, wc, wa, g, kappa, gamma,
pump, psi0, use_rwa, tlist):
# Hamiltonian
a = tensor(destroy(N), identity(2))
sm = tensor(identity(N), destroy(2))
if use_rwa:
# use the rotating wave approxiation
H = wc * a.dag() * a + wa * sm.dag() * sm + g * (
a.dag() * sm + a * sm.dag())
else:
H = wc * a.dag() * a + wa * sm.dag() * sm + g * (
a.dag() + a) * (sm + sm.dag())
# collapse operators
c_op_list = []
n_th_a = 0.0 # zero temperature
rate = kappa * (1 + n_th_a)
c_op_list.append(np.sqrt(rate) * a)
rate = kappa * n_th_a
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a.dag())
rate = gamma
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sm)
rate = pump
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sm.dag())
# evolve and calculate expectation values
output = mesolve(
H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm])
expt_list = output.expect[0], output.expect[1]
return expt_list[0], expt_list[1]
def testQubitDynamics1(self):
"mesolve: qubit with dissipation"
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.1
g1 = 0.0
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 5, 200)
sx, sy, sz = self.qubit_integrate(tlist, psi0, epsilon, delta, g1, g2)
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = -np.sin(2 * np.pi * tlist) * np.exp(-tlist * g2)
sz_analytic = np.cos(2 * np.pi * tlist) * np.exp(-tlist * g2)
assert_(max(abs(sx - sx_analytic)) < 0.05)
assert_(max(abs(sy - sy_analytic)) < 0.05)
assert_(max(abs(sz - sz_analytic)) < 0.05)
def testQubitDynamics2(self):
"mesolve: qubit without dissipation"
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.0
g1 = 0.0
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 5, 200)
sx, sy, sz = self.qubit_integrate(tlist, psi0, epsilon, delta, g1, g2)
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = -np.sin(2 * np.pi * tlist) * np.exp(-tlist * g2)
sz_analytic = np.cos(2 * np.pi * tlist) * np.exp(-tlist * g2)
assert_(max(abs(sx - sx_analytic)) < 0.05)
assert_(max(abs(sy - sy_analytic)) < 0.05)
assert_(max(abs(sz - sz_analytic)) < 0.05)
def testCase1(self):
"mesolve: cavity-qubit interaction, no dissipation"
use_rwa = True
N = 4 # number of cavity fock states
wc = 2 * np.pi * 1.0 # cavity frequency
wa = 2 * np.pi * 1.0 # atom frequency
g = 2 * np.pi * 0.01 # coupling strength
kappa = 0.0 # cavity dissipation rate
gamma = 0.0 # atom dissipation rate
pump = 0.0 # atom pump rate
# start with an excited atom and maximum number of photons
n = N - 2
psi0 = tensor(basis(N, n), basis(2, 1))
tlist = np.linspace(0, 1000, 2000)
nc, na = self.jc_integrate(
N, wc, wa, g, kappa, gamma, pump, psi0, use_rwa, tlist)
nc_ex = (n + 0.5 * (1 - np.cos(2 * g * np.sqrt(n + 1) * tlist)))
na_ex = 0.5 * (1 + np.cos(2 * g * np.sqrt(n + 1) * tlist))
assert_(max(abs(nc - nc_ex)) < 0.005, True)
assert_(max(abs(na - na_ex)) < 0.005, True)
def testCase2(self):
"mesolve: cavity-qubit without interaction, decay"
use_rwa = True
N = 4 # number of cavity fock states
wc = 2 * np.pi * 1.0 # cavity frequency
wa = 2 * np.pi * 1.0 # atom frequency
g = 2 * np.pi * 0.0 # coupling strength
kappa = 0.005 # cavity dissipation rate
gamma = 0.01 # atom dissipation rate
pump = 0.0 # atom pump rate
# start with an excited atom and maximum number of photons
n = N - 2
psi0 = tensor(basis(N, n), basis(2, 1))
tlist = np.linspace(0, 1000, 2000)
nc, na = self.jc_integrate(
N, wc, wa, g, kappa, gamma, pump, psi0, use_rwa, tlist)
nc_ex = (n + 0.5 * (1 - np.cos(2 * g * np.sqrt(n + 1) * tlist))) * \
np.exp(-kappa * tlist)
na_ex = 0.5 * (1 + np.cos(2 * g * np.sqrt(n + 1) * tlist)) * \
np.exp(-gamma * tlist)
assert_(max(abs(nc - nc_ex)) < 0.005, True)
assert_(max(abs(na - na_ex)) < 0.005, True)
def testCase3(self):
"mesolve: cavity-qubit with interaction, decay"
use_rwa = True
N = 4 # number of cavity fock states
wc = 2 * np.pi * 1.0 # cavity frequency
wa = 2 * np.pi * 1.0 # atom frequency
g = 2 * np.pi * 0.1 # coupling strength
kappa = 0.05 # cavity dissipation rate
gamma = 0.001 # atom dissipation rate
pump = 0.25 # atom pump rate
# start with an excited atom and maximum number of photons
n = N - 2
psi0 = tensor(basis(N, n), basis(2, 1))
tlist = np.linspace(0, 200, 500)
nc, na = self.jc_integrate(
N, wc, wa, g, kappa, gamma, pump, psi0, use_rwa, tlist)
# we don't have any analytics | |
total, outp = self.parse_xml(_id, ship, table, raw)
if total:
await bot.say(outp)
else:
print("WARNING: ###############################################")
print("WARNING: Didn't find anything to lookup, skipping lookup.")
print("WARNING: ###############################################")
await bot.say("<@{}> {}{}/{}".format(_id, self.dir_fits[3:], group, data))
return
await bot.say("<@{}> I'm sorry Dave, I can't allow you to do that.".format(_id))
return
@bot.command(pass_context=True)
async def route(ctx):
"""Show the routes from one system to another.
------------------------------
DESCRIPTION: Route planning, from source to destination shows each hop.
Shortest path is default, but you can specify secure/high or insecure/low/null.
------------------------------
FORMAT: #route <source> <destination> [routing]
------------------------------
EXAMPLE: #route jita vlil
12 jumps using shortest routing.
Jita > Ikuchi > Tunttaras > Nourvukaiken > Tama > Kedama > Hirri > Pynekastoh > Hikkoken > Nennamaila > Aldranette > Vlillirier"""
_id = ctx.message.author.id
parts = ctx.message.content.split()
if len(parts) == 4:
sort = parts[3].lower()
if sort in ['shortest','secure','insecure']:
sort = parts[3].lower()
elif sort.startswith('sh'):
sort = 'shortest'
elif sort.startswith('sec'):
sort = 'secure'
elif sort.startswith('hi'):
sort = 'secure'
elif sort.startswith('in'):
sort = 'insecure'
elif sort.startswith('lo'):
sort = 'insecure'
elif sort.startswith('nu'):
sort = 'insecure'
elif sort.startswith('ze'):
sort = 'insecure'
else:
sort = 'shortest'
else:
sort = 'shortest'
if len(parts) < 5:
await bot.say('<@{}> Give me a source and destination system, ex. #route jita akora'.format(_id))
return
src = []
for system_id, d in self.systems.items():
if parts[1].lower() == d['name'].lower():
src.append( [d['name'], d['system_id']] )
break
if len(src) < 1:
for system_id, d in self.systems.items():
if d['name'].lower().startswith(parts[1].lower()):
src.append( [d['name'], d['system_id']] )
break
if len(src) < 1:
await bot.say("<@{}> Starting system '{}' not found.".format(_id, parts[1]))
return
dst = []
for system_id, d in self.systems.items():
if parts[2].lower() == d['name'].lower():
dst.append( [d['name'], d['system_id']] )
break
if len(dst) < 2:
for system_id, d in self.systems.items():
if d['name'].lower().startswith(parts[2].lower()):
break
if len(dst) < 1:
await bot.say("<@{}> Starting system found, but destination '{}' was not found.".format(_id, parts[1]))
return
url = 'https://esi.evetech.net/latest/route/{}/{}/?flag={}'.format(src[0][1], dst[0][1], sort)
print(url)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = eval(response)
flag_yes = True
if flag_yes:
data = '<@{}> {} jumps using {} routing.```css\n'.format(_id, len(response), sort)
route = ''
for _sys in response:
for system_id, d in self.systems.items():
if _sys == d['system_id']:
sec = str(round(d['security_status'],1))
if sec[0:2] == '0.':
sec = sec[1:]
route += '{}({}) > '.format(d['name'], sec)
return
route = route[:-3]
data += route
data += '```'
await bot.say(data)
@bot.command(pass_context=True)
async def map(ctx):
"""Fetch a dotlan map for any region.
------------------------------
DESCRIPTION: Retreive dotlan map link highlighting recent jumps.
------------------------------
FORMAT: #map <region>
------------------------------
EXAMPLE: #map the for
http://evemaps.dotlan.net/map/the_forge#jumps"""
_id = ctx.message.author.id
#http://evemaps.dotlan.net/map/Tribute/M-OEE8#jumps
url = 'http://evemaps.dotlan.net/map/'
try:
name = ctx.message.content
if len(name) > 2:
name = '_'.join(name)
elif len(name) == 2:
name = name[1]
else:
await bot.say("<@{}> **Which region?** (partial match ok)```{}```".format(_id, ', '.join(self.regionslist)))
return
#print('Processing map request for {}'.format(name))
found = False
for region in self.regionslist:
if name == region.lower():
found = True
print('Exact match found! {}'.format(name))
break
if not found:
print("No exact match found, checking nicknames.")
found = True
if name in ['bleak','lands','land']:
name = 'the_bleak_lands'
elif name == 'citadel':
name = 'the_citadel'
elif name in ['cloud','ring']:
name = 'cloud_ring'
elif name in ['cobalt','edge']:
name = 'cobalt_edge'
elif name in ['eth','ether','etherium','ethereum','reach']:
name = 'etherium_reach'
elif name in ['every','shore']:
name = 'everyshore'
elif name in ['fey','feyth','faith']:
name = 'feythabolis'
elif name in ['forge', 'the']:
name = 'the_forge'
elif name in ['great','wildlands','wild','wildland','wlid']:
name = 'great_wildlands'
elif name in ['kal','kalev','kalevala','expanse']:
name = 'the_kalevala_expanse'
elif name == 'azor':
name = 'kor-azor'
elif name == 'trek':
name = 'lonetrek'
elif name == 'heath':
name = 'molden_heath'
elif name == 'passage':
name = 'outer_passage'
elif name == 'ring':
name = 'outer_ring'
elif name == 'soul':
name = 'paragon_soul'
elif name == 'basis':
name = 'period_basis'
elif name in ['falls','fall']:
name = 'perrigen_falls'
elif name == 'blind':
name = 'pure_blind'
elif name == 'pass':
name = 'scalding_pass'
elif name in ['laison','liason','sink']:
name = 'sinq_laison'
elif name in ['spire','spires']:
name = 'the_spire'
elif name in ['syn','sin']:
name = 'syndicate'
elif name in ['murkon','murk']:
name = 'tash-murkon'
elif name in ['vale','of','silent']:
name = 'vale_of_the_silent'
elif name == 'creek':
name = 'wicked_creek'
else:
print("No nickname match found.")
found = False
if not found:
for region in self.regionslist:
print("checking {} = {}".format(name,region.lower()))
if region.lower().startswith(name):
name = region
found = True
break
if found:
url = '<{}{}#jumps>'.format(url, name)
print('Sending link: {}'.format(url))
await bot.say("<@{} {}".format(_id, url))
else:
await bot.say("<@{}> No match found. **Which region?** (partial match ok)```{}```".format(_id, ', '.join(self.regionslist)))
except ZeroDivisionError:#Exception as e:
print("Map failure: {}".format(e))
try:
await bot.say("<@{}> Hmm, something went wrong.".format(_id))
except Exception as e:
self.do_restart()
@bot.command(pass_context=True)
async def get_auth(ctx):
"""get the auth url needed for accessing assets"""
_id = ctx.message.author.id
url = 'https://login.eveonline.com/oauth/authorize?response_type=token&redirect_uri=https://localhost/callback&client_id=baaf8fc216864da297227ba80c57f445&scope=publicData+esi-assets.read_assets.v1'
await bot.say('<@{}> Sign in URL: {}'.format(_id, url))
the_id = self.people.get(_id, None)
if the_id is None:
the_token = None
the_token = self.people[_id].get('token', 'None')
the_char = self.people[_id].get('char', 'None')
the_char = self.people[_id].get('char_id', 'None')
the_expires = self.people[_id].get('expires', 'None')
if the_id is None or the_token == 'None':
await bot.say('<@{}> No token set. Please sign in with the above url, then use #set_auth and tell me the URL you are redirected to after signing in, and I will extract the authorization token, or you can extract the token from the url and tell me just the token part.'.format(_id))
return
if the_expires != 'None':
the_expires = str(self.people[_id]['expires'])[:-10]
time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds
if time_left > 1234 or time_left < 1:
time_left = "Expired"
else:
time_left = '{:.1f} min'.format(time_left / 60.0)
data = '<@{}> Auth Info:```css\n'.format(_id)
data += 'Character: {}\n'.format(the_char)
data += 'Character ID: {}\n'.format(self.people[_id]['char_id'])
data += 'Token: {}\n'.format(the_token)
data += 'Token expires: {} {}```'.format(time_left, the_expires)
await bot.say(data)
@bot.command(pass_context=True)
async def set_auth(ctx):
"""set the authorization token for access to assets"""
_id = ctx.message.author.id
parts = ctx.message.content.split()
try:
if len(parts) > 1 and parts[1].startswith('https://localhost/callback#access_token='):
token = parts[1].split('#access_token=')[-1]
token = token.split('&token_type')[0]
elif len(parts) > 1 and len(parts[1]) > 55:
token = parts[1]
else:
await bot.say('<@{}> Use #get_auth to get the authorization url, sign in, then tell me the URL you are redirected to after signing in, and I will extract the authorization token, or you can extract the token from the url and tell me just the token part.'.format(_id))
return
if self.people.get(_id, None) is None:
self.people[_id] = {}
self.people[_id]['id'] = _id
the_char = self.people[_id].get('char', 'None')
the_char_id = self.people[_id].get('char_id', 'None')
self.people[_id]['token'] = token
self.people[_id]['expires'] = datetime.utcnow() + timedelta(minutes=99)
data = '<@{}> Token received.```css\n'.format(_id)
data += 'Character: {}\n'.format(the_char)
data += 'Character ID: {}\n'.format(the_char_id)
data += 'Token: {}\n'.format(self.people[_id]['token'])
data += 'Token expires: 20 min ({})```'.format(str(self.people[_id]['expires'])[:-10])
# save
with open('people.pickle', 'wb') as f:
pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)
await bot.say(data)
except Exception as e:
print("X"*42)
print(e)
print("X"*42)
await bot.say("<@{}> That doesn't look like the returned URL or token to me.".format(_id))
await asyncio.sleep(0.25)
@bot.command(pass_context=True)
async def set_char(ctx):
"""Set your character name to pair with access to assets"""
_id = ctx.message.author.id
parts = ctx.message.content.split()
if self.people.get(_id, None) is None:
self.people[_id] = {}
self.people[_id]['id'] = _id
self.people[_id]['char'] = ' '.join(parts[1:])
await bot.say("<@{}> Searching for '{}', please wait...".format(_id, self.people[_id]['char']))
await asyncio.sleep(0.25)
flag_fail = False
url = 'https://esi.evetech.net/latest/search/?categories=character&strict=true&search={}'.format(self.people[_id]['char'].replace(' ','%20'))
print(url)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
print("RESPONSE=[{}]END_RESPONSE".format(response))
d = eval(response)
try:
if d.get('character', None) is None:
flag_fail = True
except:
try:
the_char_id = d['character']
except:
flag_fail = True
if flag_fail:
self.people[_id]['char'] = 'None'
the_char_id = 'None'
self.people[_id]['char_id'] = the_char_id
the_token = self.people[_id].get('token', 'None')
the_expires = self.people[_id].get('expires', 'None')
if the_token == 'None' or the_expires == 'None':
time_left = "Expired"
if the_expires != 'None':
time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds
if time_left > 1234 or time_left < 1:
time_left = "Expired"
else:
time_left = '{:.1f} min'.format(time_left / 60.0)
if flag_fail:
data = "<@{}> Invalid character name! Did you spell it correctly?```css\n".format(_id)
else:
data = "<@{}> Character name set to: '{}'```css\n".format(_id, self.people[_id]['char'])
# save
with open('people.pickle', 'wb') as f:
pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)
data += 'Character: {}\n'.format(self.people[_id]['char'])
data += 'Character ID: {}\n'.format(self.people[_id]['char_id'])
data += 'Token: {}\n'.format(the_token)
data += 'Token expires: {} | |
copy of atoms object from last calculation
self.results = {} # calculated properties (energy, forces, ...)
self.parameters = None # calculational parameters
if restart is not None:
try:
self.read(restart) # read parameters, atoms and results
except ReadError:
if ignore_bad_restart_file:
self.reset()
else:
raise
self.label = None
self.directory = None
self.prefix = None
self.set_label(label)
if self.parameters is None:
# Use default parameters if they were not read from file:
self.parameters = self.get_default_parameters()
if atoms is not None:
atoms.calc = self
if self.atoms is not None:
# Atoms were read from file. Update atoms:
if not (equal(atoms.numbers, self.atoms.numbers) and
(atoms.pbc == self.atoms.pbc).all()):
raise RuntimeError('Atoms not compatible with file')
atoms.positions = self.atoms.positions
atoms.cell = self.atoms.cell
self.set(**kwargs)
if not hasattr(self, 'name'):
self.name = self.__class__.__name__.lower()
def set_label(self, label):
"""Set label and convert label to directory and prefix.
Examples:
* label='abc': (directory='.', prefix='abc')
* label='dir1/abc': (directory='dir1', prefix='abc')
Calculators that must write results to files with fixed names
can overwrite this method so that the directory is set to all
of label."""
self.label = label
if label is None:
self.directory = None
self.prefix = None
else:
self.directory, self.prefix = os.path.split(label)
if self.directory == '':
self.directory = os.curdir
def get_default_parameters(self):
return Parameters(copy.deepcopy(self.default_parameters))
def todict(self):
default = self.get_default_parameters()
return dict((key, value)
for key, value in self.parameters.items()
if key not in default or value != default[key])
def reset(self):
"""Clear all information from old calculation."""
self.atoms = None
self.results = {}
def read(self, label):
"""Read atoms, parameters and calculated properties from output file.
Read result from self.label file. Raise ReadError if the file
is not there. If the file is corrupted or contains an error
message from the calculation, a ReadError should also be
raised. In case of succes, these attributes must set:
atoms: Atoms object
The state of the atoms from last calculation.
parameters: Parameters object
The parameter dictionary.
results: dict
Calculated properties like energy and forces.
The FileIOCalculator.read() method will typically read atoms
and parameters and get the results dict by calling the
read_results() method."""
self.set_label(label)
def get_atoms(self):
if self.atoms is None:
raise ValueError('Calculator has no atoms')
atoms = self.atoms.copy()
atoms.calc = self
return atoms
@classmethod
def read_atoms(cls, restart, **kwargs):
return cls(restart=restart, label=restart, **kwargs).get_atoms()
def set(self, **kwargs):
"""Set parameters like set(key1=value1, key2=value2, ...).
A dictionary containing the parameters that have been changed
is returned.
Subclasses must implement a set() method that will look at the
chaneged parameters and decide if a call to reset() is needed.
If the changed parameters are harmless, like a change in
verbosity, then there is no need to call reset().
The special keyword 'parameters' can be used to read
parameters from a file."""
if 'parameters' in kwargs:
filename = kwargs.pop('parameters')
parameters = Parameters.read(filename)
parameters.update(kwargs)
kwargs = parameters
changed_parameters = {}
for key, value in kwargs.items():
oldvalue = self.parameters.get(key)
if key not in self.parameters or not equal(value, oldvalue):
if isinstance(oldvalue, dict):
# Special treatment for dictionary parameters:
for name in value:
if name not in oldvalue:
raise KeyError(
'Unknown subparameter "%s" in '
'dictionary parameter "%s"' % (name, key))
oldvalue.update(value)
value = oldvalue
changed_parameters[key] = value
self.parameters[key] = value
return changed_parameters
def check_state(self, atoms, tol=1e-15):
"""Check for system changes since last calculation."""
if self.atoms is None:
system_changes = all_changes
else:
system_changes = []
if not equal(self.atoms.positions, atoms.positions, tol):
system_changes.append('positions')
if not equal(self.atoms.numbers, atoms.numbers):
system_changes.append('numbers')
if not equal(self.atoms.cell, atoms.cell, tol):
system_changes.append('cell')
if not equal(self.atoms.pbc, atoms.pbc):
system_changes.append('pbc')
if not equal(self.atoms.get_initial_magnetic_moments(),
atoms.get_initial_magnetic_moments(), tol):
system_changes.append('initial_magmoms')
if not equal(self.atoms.get_initial_charges(),
atoms.get_initial_charges(), tol):
system_changes.append('initial_charges')
return system_changes
def get_potential_energy(self, atoms=None, force_consistent=False):
energy = self.get_property('energy', atoms)
if force_consistent:
return self.results.get('free_energy', energy)
else:
return energy
def get_forces(self, atoms=None):
return self.get_property('forces', atoms)
def get_stress(self, atoms=None):
return self.get_property('stress', atoms)
def get_dipole_moment(self, atoms=None):
return self.get_property('dipole', atoms)
def get_charges(self, atoms=None):
return self.get_property('charges', atoms)
def get_magnetic_moment(self, atoms=None):
return self.get_property('magmom', atoms)
def get_magnetic_moments(self, atoms=None):
return self.get_property('magmoms', atoms)
def get_property(self, name, atoms=None, allow_calculation=True):
if name not in self.implemented_properties:
raise NotImplementedError
if atoms is None:
atoms = self.atoms
system_changes = []
else:
system_changes = self.check_state(atoms)
if system_changes:
self.reset()
if name not in self.results:
if not allow_calculation:
return None
try:
self.calculate(atoms, [name], system_changes)
except Exception:
self.reset()
raise
if name == 'magmom' and 'magmom' not in self.results:
return 0.0
if name == 'magmoms' and 'magmoms' not in self.results:
return np.zeros(len(atoms))
result = self.results[name]
if isinstance(result, np.ndarray):
result = result.copy()
return result
def calculation_required(self, atoms, properties):
system_changes = self.check_state(atoms)
if system_changes:
return True
for name in properties:
if name not in self.results:
return True
return False
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
"""Do the calculation.
properties: list of str
List of what needs to be calculated. Can be any combination
of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom'
and 'magmoms'.
system_changes: list of str
List of what has changed since last calculation. Can be
any combination of these five: 'positions', 'numbers', 'cell',
'pbc', 'initial_charges' and 'initial_magmoms'.
Subclasses need to implement this, but can ignore properties
and system_changes if they want. Calculated properties should
be inserted into results dictionary like shown in this dummy
example::
self.results = {'energy': 0.0,
'forces': np.zeros((len(atoms), 3)),
'stress': np.zeros(6),
'dipole': np.zeros(3),
'charges': np.zeros(len(atoms)),
'magmom': 0.0,
'magmoms': np.zeros(len(atoms))}
The subclass implementation should first call this
implementation to set the atoms attribute.
"""
if atoms is not None:
self.atoms = atoms.copy()
def calculate_numerical_forces(self, atoms, d=0.001):
"""Calculate numerical forces using finite difference.
All atoms will be displaced by +d and -d in all directions."""
from ase.calculators.test import numeric_force
return np.array([[numeric_force(atoms, a, i, d)
for i in range(3)] for a in range(len(atoms))])
def calculate_numerical_stress(self, atoms, d=1e-6, voigt=True):
"""Calculate numerical stress using finite difference."""
stress = np.zeros((3, 3), dtype=float)
cell = atoms.cell.copy()
V = atoms.get_volume()
for i in range(3):
x = np.eye(3)
x[i, i] += d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
eplus = atoms.get_potential_energy()
x[i, i] -= 2 * d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
eminus = atoms.get_potential_energy()
stress[i, i] = (eplus - eminus) / (2 * d * V)
x[i, i] += d
j = (i + 1) % 3
x[i, j] = d
x[j, i] = d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
eplus = atoms.get_potential_energy()
x[i, j] = -d
x[j, i] = -d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
eminus = atoms.get_potential_energy()
stress[i, j] = (eplus - eminus) / (4 * d * V)
stress[j, i] = stress[i, j]
atoms.set_cell(cell, scale_atoms=True)
if voigt:
return stress.flat[[0, 4, 8, 5, 2, 1]]
else:
return stress
def get_spin_polarized(self):
return False
class Dynamics:
"""Base-class for all MD and structure optimization classes."""
def __init__(self, atoms, logfile, trajectory, master=None):
"""Dynamics object.
Parameters:
atoms: Atoms object
The Atoms object to operate on.
logfile: file object or str
If *logfile* is a string, a file with that name will be opened.
Use '-' for stdout.
trajectory: Trajectory object or str
Attach trajectory object. If *trajectory* is a string a
Trajectory will be constructed. Use *None* for no
trajectory.
master: boolean
Defaults to None, which causes only rank 0 to save files. If
set to true, this rank will save files.
"""
self.atoms = atoms
if master is None:
master = DummyMPI().rank == 0
if not master:
logfile = None
elif isinstance(logfile, str):
if logfile == '-':
logfile = sys.stdout
else:
logfile = open(logfile, 'a')
self.logfile = logfile
self.observers = []
self.nsteps = 0
if trajectory is not None:
if isinstance(trajectory, str):
trajectory = Trajectory(trajectory, mode='w',
atoms=atoms, master=master)
self.attach(trajectory)
def get_number_of_steps(self):
return self.nsteps
def insert_observer(self, function, position=0, interval=1,
*args, **kwargs):
"""Insert an observer."""
if not isinstance(function, collections.Callable):
function = function.write
self.observers.insert(position, (function, interval, args, kwargs))
def attach(self, function, interval=1, *args, **kwargs):
"""Attach callback function.
If *interval > 0*, at every *interval* steps, call *function* with
arguments *args* and keyword arguments *kwargs*.
If *interval <= 0*, after step *interval*, call *function* with
arguments *args* and keyword arguments *kwargs*. This is
currently zero indexed."""
if not hasattr(function, '__call__'):
function = function.write
self.observers.append((function, interval, args, kwargs))
def call_observers(self):
for function, interval, args, kwargs in self.observers:
call = False
# Call every interval iterations
if interval > 0:
if (self.nsteps % | |
From dictionaries, plots average activity across gabor orientations or
brick directions per ROI for a single session and optionally a single
quintile. (Single figure type)
Required args:
- fig_type (str) : type of figure to plot, i.e., "byplot", "byreg",
"byfir" or "by{}{}" (ori/dir, deg)
- analyspar (dict): dictionary with keys of AnalysPar namedtuple
- stimpar (dict) : dictionary with keys of StimPar namedtuple
- quintpar (dict) : dictionary with keys of QuintPar namedtuple
- sess_info (dict): dictionary containing information from each
session (only first session used)
["mouse_ns"] (list) : mouse numbers
["sess_ns"] (list) : session numbers
["lines"] (list) : mouse lines
["planes"] (list) : imaging planes
["nrois"] (list) : number of ROIs in session
["nanrois_{}"] (list) : list of ROIs with NaNs/Infs in raw or dF/F
traces ("raw", "dff")
- tr_data (dict) : dictionary containing information to plot colormap.
Surprise x ori/dir keys are formatted as
[{s}_{od}] for surp in ["reg", "surp"]
and od in [0, 45, 90, 135] or
["right", "left"]
["n_seqs"] (dict) : dictionary containing number of seqs for
each surprise x ori/dir combination under a
separate key
["scale_vals"] (dict): dictionary containing 1D array or list of
scaling values for each surprise x ori/dir
combination under a separate key.
(NaN arrays for combinations with 0 seqs.)
["{}_min"] (num): minimum value from corresponding tr_stats
mean/medians
["{}_max"] (num): maximum value from corresponding tr_stats
mean/medians
["roi_sort"] (dict) : dictionary containing 1D arrays or list
of peak sorting order for each
surprise x ori/dir combination under a
separate key.
(NaN arrays for combinations with 0 seqs.)
["roi_me"] (dict) : dictionary containing trace mean/medians for
each ROI as 2D arrays or nested lists,
structured as:
ROIs x frames,
with each surprise x ori/dir combination
under a separate key
(NaN arrays for combinations with 0 seqs.)
["xran"] (list) : time values for the 2p frames
Optional args:
- figpar (dict) : dictionary containing the following figure parameter
dictionaries
default: None
["init"] (dict): dictionary with figure initialization parameters
["save"] (dict): dictionary with figure saving parameters
["dirs"] (dict): dictionary with additional figure parameters
["mng"] (dict): dictionary with parameters to manage matplotlib
- savedir (str) : path of directory in which to save plots.
default: None
- log_dir (bool) : if True, the figure saving directory is logged.
default: True
Returns:
- fulldir (str) : final name of the directory in which the figure is
saved (may differ from input savedir, if datetime
subfolder is added.)
"""
statstr_pr = sess_str_util.stat_par_str(
analyspar["stats"], analyspar["error"], "print")
stimstr_pr = sess_str_util.stim_par_str(
stimpar["stimtype"], stimpar["bri_dir"], stimpar["bri_size"],
stimpar["gabk"], "print")
stimstr = sess_str_util.stim_par_str(
stimpar["stimtype"], stimpar["bri_dir"], stimpar["bri_size"],
stimpar["gabk"])
if savedir is None:
savedir = os.path.join(
figpar["dirs"]["roi"],
figpar["dirs"]["oridir"])
cmap = plot_util.manage_mpl(cmap=True, nbins=100, **figpar["mng"])
# extract some info from sess_info (only one session)
keys = ["mouse_ns", "sess_ns", "lines", "planes"]
[mouse_n, sess_n, line, plane] = [sess_info[key][0] for key in keys]
dendstr = sess_str_util.dend_par_str(analyspar["dend"], plane, "roi")
dendstr_pr = sess_str_util.dend_par_str(
analyspar["dend"], plane, "roi", "print")
surps = ["reg", "surp"]
if stimpar["stimtype"] == "gabors":
surp_labs = surps
var_name = "orientation"
deg = "deg"
deg_pr = u"\u00B0"
oridirs = stimpar["gab_ori"]
n = 6
elif stimpar["stimtype"] == "bricks":
surp_labs = [f"{surps[i]} -> {surps[1-i]}" for i in range(len(surps))]
var_name = "direction"
deg = ""
deg_pr = ""
oridirs = stimpar["bri_dir"]
n = 7
qu_str, qu_str_pr = quintpar["qu_lab"][0], quintpar["qu_lab_pr"][0]
if qu_str != "":
qu_str = f"_{qu_str}"
if qu_str_pr != "":
qu_str_pr = f" - {qu_str_pr.capitalize()}"
if figpar is None:
figpar = sess_plot_util.init_figpar()
figpar = copy.deepcopy(figpar)
figpar["init"]["ncols"] = len(oridirs)
figpar["init"]["sharex"] = True
# plot colormaps
# gentitle = (f"Mouse {mouse_n} - {stimstr_pr} " +
# u"{} ".format(statstr_pr) + "across seqs colormaps"
# f"{qu_str_pr} \n(sess {sess_n}, {line} {plane})")
line_str, plane_str = "5", "dendrites"
if "23" in line:
line_str = "2/3"
if "soma" in plane:
plane_str = "somata"
gentitle = f"Mouse {mouse_n} - layer {line_str} {plane_str}{dendstr_pr}"
gen_savename = (f"roi_cm_m{mouse_n}_sess{sess_n}{qu_str}_{stimstr}_"
f"{plane}{dendstr}")
gen_savename = f"colormap_m{mouse_n}s{sess_n}_{stimstr}_{plane}"
if fig_type != "byfir":
return ""
if fig_type == "byplot":
scale_type = "per plot"
peak_sort = ""
figpar["init"]["sharey"] = False
elif fig_type == "byreg":
scale_type = f"within {var_name}"
peak_sort = f" of {surps[0]}"
figpar["init"]["sharey"] = False
elif fig_type == f"by{oridirs[0]}{deg}":
scale_type = "within surp/reg"
peak_sort = f" of first {var_name}"
figpar["init"]["sharey"] = True
elif fig_type == "byfir":
scale_type = "across plots"
peak_sort = " of first plot"
figpar["init"]["sharey"] = True
else:
gen_util.accepted_values_error("fig_type", fig_type,
["byplot", "byreg", f"by{oridirs[0]}{deg}", "byfir"])
subtitle = (f"ROIs sorted by peak activity{peak_sort} and scaled "
f"{scale_type}")
logger.info(f"- {subtitle}", extra={"spacing": TAB})
# suptitle = f"{gentitle}\n({subtitle})"
suptitle = gentitle
# get scaled and sorted ROI mean/medians (ROI x frame)
scaled_sort_me = roi_plots.scale_sort_trace_data(
tr_data, fig_type, surps, oridirs)
fig, ax = plot_util.init_fig(len(oridirs) * len(surps), **figpar["init"])
xran_edges = [np.min(tr_data["xran"]), np.max(tr_data["xran"])]
nrois = scaled_sort_me[f"{surps[0]}_{oridirs[0]}"].shape[1]
yticks_ev = int(10 * np.max([1, np.ceil(nrois/100)])) # avoid > 10 ticks
for o, od in enumerate(oridirs):
for s, (surp, surp_lab) in enumerate(zip(surps, surp_labs)):
sub_ax = ax[s][o]
key = f"{surp}_{od}"
title = u"{} seqs ({}{}) (n={})".format(
surp_lab.capitalize(), od, deg_pr, tr_data["n_seqs"][key])
if s == 0:
od_pr = od
if stimpar["stimtype"] == "bricks":
od_pr = od_pr.capitalize()
title = u"{}{}".format(od_pr, deg_pr)
else:
title = None
x_ax = None
y_ax = "ROIs"
if s != 1 or o != 0:
y_ax = ""
if stimpar["stimtype"] == "gabors":
x_ax = ""
sess_plot_util.add_axislabels(
sub_ax, fluor=analyspar["fluor"], x_ax=x_ax, y_ax=y_ax,
datatype="roi")
im = plot_util.plot_colormap(
sub_ax, scaled_sort_me[key], title=title, cmap=cmap, n_xticks=n,
yticks_ev=yticks_ev, xran=xran_edges, xticks="auto")
if stimpar["stimtype"] == "bricks":
plot_util.add_bars(sub_ax, 0)
else:
sub_ax.set_xticks([])
for s, surp in enumerate(surps):
sub_ax = ax[s:s+1]
if stimpar["stimtype"] == "gabors":
sess_plot_util.plot_labels(
sub_ax, stimpar["gabfr"], surp, pre=stimpar["pre"],
post=stimpar["post"], sharey=figpar["init"]["sharey"],
t_heis=-0.05)
plot_util.add_colorbar(fig, im, len(oridirs), cm_prop=0.06)
fig.suptitle(suptitle, fontsize="xx-large", y=1.08)
savename = f"{gen_savename}_{fig_type}"
fulldir = plot_util.savefig(
fig, savename, savedir, log_dir=log_dir, **figpar["save"])
plt.close(fig)
return fulldir
#############################################
def plot_oridir_colormaps(analyspar, sesspar, stimpar, extrapar, quintpar,
tr_data, sess_info, figpar=None, savedir=None,
parallel=False):
"""
plot_oridir_colormaps(analyspar, sesspar, stimpar, extrapar, quintpar,
tr_data, sess_info)
From dictionaries, plots average activity across gabor orientations or
brick directions per ROI as colormaps for a single session and optionally
a single quintile.
Required args:
- analyspar (dict): dictionary with keys of AnalysPar namedtuple
- sesspar (dict) : dictionary with keys of SessPar namedtuple
- stimpar (dict) : dictionary with keys of StimPar namedtuple
- extrapar (dict) : dictionary containing additional analysis
parameters
["analysis"] (str): analysis type (e.g., "o")
["datatype"] (str): datatype (e.g., "roi")
- quintpar (dict) : dictionary with keys of QuintPar namedtuple
- sess_info (dict): dictionary containing information from each
session (only first session used)
["mouse_ns"] (list) : mouse numbers
["sess_ns"] (list) : session numbers
["lines"] (list) : mouse lines
["planes"] (list) : imaging planes
["nrois"] (list) : number of ROIs in session
["nanrois_{}"] (list) : list of ROIs with NaNs/Infs in raw or dF/F
traces ("raw", "dff")
- tr_data (dict) : dictionary containing information to plot colormap.
Surprise x ori/dir keys are formatted as
[{s}_{od}] for surp in ["reg", "surp"]
and od in [0, 45, 90, 135] or
["right", "left"]
["n_seqs"] (dict) : dictionary containing number of seqs for
each surprise x ori/dir combination under a
separate key
["scale_vals"] (dict): dictionary containing 1D array or list of
scaling values for each surprise x ori/dir
combination under a separate key.
(NaN arrays for combinations with 0 seqs.)
["{}_min"] (num): minimum value from corresponding tr_stats
mean/medians
["{}_max"] (num): maximum value from corresponding tr_stats
mean/medians
["roi_sort"] (dict) : dictionary containing 1D arrays or list
of peak sorting order for each
surprise x ori/dir combination under a
separate key.
(NaN arrays for combinations with 0 seqs.)
["roi_me"] (dict) : dictionary containing trace mean/medians for
each ROI as 2D arrays or nested lists,
structured as:
ROIs x frames,
with each surprise x | |
in enumerate(case) \
if c == '1' and control_qubits[i] not in control_qubits_by_case]
# control_qubits_by_case += [control_qubits[i] for i,c in enumerate(case) if c == '1']
# sort selected control qubits according to readout (feedline) order
# qb_ro_order = np.sum([ list(self._acq_ch_map[key].keys()) for key in self._acq_ch_map.keys()], dtype=object)
# dqb_ro_order = np.array(qb_ro_order, dtype=str)[[qb[0] == 'D' for qb in qb_ro_order]]
control_qubits_by_case = [x for x, _ in sorted(zip(control_qubits_by_case, control_qubits))]
Q_idxs_control += [self.find_instrument(Q).cfg_qubit_nr() for Q in control_qubits_by_case]
cases = control_cases_to_measure
# for separate preparation of parking qubits in 1, used to study parking
if parking_qubits:
Q_idxs_parking = []
for i, qb in enumerate(parking_qubits):
assert qb in self.qubits()
if qb in target_qubits + control_qubits:
log.warning(f"Parking qubit {qb} already given as control or target qubit!")
Q_idxs_parking += [self.find_instrument(qb).cfg_qubit_nr()]
# prepare list of all used qubits
all_qubits = target_qubits + control_qubits_by_case
if parking_qubits:
all_qubits += parking_qubits
# check the lutman of the target, control and parking qubits for cw_27,
# which is needed for refocusing, case preparation, and preparation in 1 (respectively)
# and prepare if necessary
for qb in all_qubits:
mw_lutman = self.find_instrument(qb).instr_LutMan_MW.get_instr()
xm180_dict = {"name": "rXm180", "theta": -180, "phi": 0, "type": "ge"}
if mw_lutman.LutMap().get(27) != xm180_dict:
print(f"{mw_lutman.name} does not have refocusing pulse, overriding cw_27..")
mw_lutman.LutMap()[27] = xm180_dict
mw_lutman.load_waveform_onto_AWG_lookuptable(27, regenerate_waveforms=True)
for i, qb in enumerate(target_qubits):
mw_lutman = self.find_instrument(qb).instr_LutMan_MW.get_instr()
mw_lutman = self.find_instrument(qb).instr_LutMan_MW.get_instr()
# load_phase_pulses already uploads all waveforms inside
mw_lutman.load_phase_pulses_to_AWG_lookuptable(
phases=np.arange(0, 360, 20) + phase_offsets[i] if phase_offsets else np.arange(0, 360, 20))
if prepare_for_timedomain:
# To preserve readout (feedline/UHF) order in preparation!
qubits_by_feedline = [['D1', 'X1'],
['D2', 'Z1', 'D3', 'D4', 'D5', 'D7', 'X2', 'X3', 'Z3'],
['D6', 'D8', 'D9', 'X4', 'Z2', 'Z4']]
all_qubits_sorted = sorted(all_qubits,
key=lambda x: [i for i, qubits in enumerate(qubits_by_feedline) if x in qubits])
log.info(f"Sorted preparation qubits: {all_qubits_sorted}")
self.prepare_for_timedomain(qubits=all_qubits_sorted)
# These are hardcoded angles in the mw_lutman for the AWG8
# only x2 and x3 downsample_swp_points available
angles = np.arange(0, 341, 20 * downsample_angle_points)
# prepare flux codeword list according to given step numbers and refocusing flag
# will be programmed in order of the list, but scheduled in parallel (if possible)
flux_cw_list = [flux_codeword + f'-{step}-refocus' if refocusing else flux_codeword + f'-{step}'
for step in flux_dance_steps]
p = mqo.parity_check_flux_dance(
Q_idxs_target=Q_idxs_target,
Q_idxs_control=Q_idxs_control,
control_cases=cases,
flux_cw_list=flux_cw_list,
Q_idxs_ramsey=Q_idxs_ramsey if ramsey_qubits else None,
Q_idxs_parking=Q_idxs_parking if parking_qubits else None,
nr_flux_dance_before_cal_points=nr_flux_dance_before_cal_points,
platf_cfg=self.cfg_openql_platform_fn(),
angles=angles,
initialization_msmt=initialization_msmt,
wait_time_before_flux=wait_time_before_flux_ns,
wait_time_after_flux=wait_time_after_flux_ns
)
s = swf.OpenQL_Sweep(
openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name="Cases",
unit="a.u."
)
d = self.get_int_avg_det(qubits=target_qubits + control_qubits)
MC.set_sweep_function(s)
MC.set_sweep_points(p.sweep_points)
MC.set_detector_function(d)
label = f"Parity_check_flux_dance_{target_qubits}_{control_qubits_by_case}_{self.msmt_suffix}_{label_suffix}"
MC.run(label, disable_snapshot_metadata=disable_metadata)
a = ma2.Parity_Check_Analysis(
label=label,
ancilla_qubits=target_qubits,
data_qubits=control_qubits_by_case,
parking_qubits=parking_qubits,
cases=cases,
plotting=plotting
)
return a.result
def measure_parity_check_fidelity(
self,
target_qubits: list,
control_qubits: list, # have to be given in readout (feedline) order
flux_dance_steps: List[int] = [1, 2, 3, 4],
flux_codeword: str = 'flux-dance',
ramsey_qubits: list = None,
refocusing: bool = False,
phase_offsets: list = None,
cases_to_measure: list = None,
result_logging_mode='raw',
prepare_for_timedomain=True,
initialization_msmt: bool = True,
nr_shots_per_case: int = 2 ** 14,
shots_per_meas: int = 2 ** 16,
wait_time_before_flux_ns: int = 0,
wait_time_after_flux_ns: int = 0,
label_suffix: str = "",
disable_metadata: bool = False,
MC=None,
):
"""
Measures a parity check fidelity. In this experiment the conditional phase
in the two-qubit Cphase gate is measured using Ramsey-lie sequence.
Specifically qubit q0 of each pair is prepared in the superposition, while q1 is in 0 or 1 state.
Next the flux pulse is applied. Finally pi/2 afterrotation around various axes
is applied to q0, and q1 is flipped back (if neccessary) to 0 state.
Plotting the probabilities of the zero state for each qubit as a function of
the afterrotation axis angle, and comparing case of q1 in 0 or 1 state, enables to
measure the conditional phase and estimale the leakage of the Cphase gate.
Args:
pairs (lst(lst)):
Contains all pairs with the order (q0,q1) where q0 in 'str' is the target and q1 in
'str' is the control. This is based on qubits that are parked in the flux-dance.
prepare_for_timedomain (bool):
should the insruments be reconfigured for time domain measurement
disable_cz (bool):
execute the experiment with no flux pulse applied
disabled_cz_duration_ns (int):
waiting time to emulate the flux pulse
wait_time_before_flux_ns (int):
additional waiting time (in ns) before the flux pulse.
wait_time_after_flux_ns (int):
additional waiting time (in ns) after the flux pulse, before
the final afterrotations
"""
if self.ro_acq_weight_type() != 'optimal':
# this occurs because the detector groups qubits per feedline.
# If you do not pay attention, this will mess up the analysis of
# this experiment.
raise ValueError('Current conditional analysis is not working with {}'.format(self.ro_acq_weight_type()))
if MC is None:
MC = self.instr_MC.get_instr()
Q_idxs_ancilla = []
for i, ancilla in enumerate(target_qubits):
log.info(f"Parity {ancilla} - {control_qubits}")
assert ancilla in self.qubits()
assert all([Q in self.qubits() for Q in control_qubits])
Q_idxs_ancilla += [self.find_instrument(ancilla).cfg_qubit_nr()]
Q_idxs_ramsey = []
if ramsey_qubits:
for i, qb in enumerate(ramsey_qubits):
assert qb in self.qubits()
if qb in target_qubits:
log.warning(f"Ramsey qubit {qb} already given as ancilla qubit!")
Q_idxs_ramsey += [self.find_instrument(qb).cfg_qubit_nr()]
Q_idxs_data = []
Q_idxs_data += [self.find_instrument(Q).cfg_qubit_nr() for Q in control_qubits]
cases = ['{:0{}b}'.format(i, len(Q_idxs_data)) for i in range(2 ** len(Q_idxs_data))]
if initialization_msmt:
nr_shots = 2 * nr_shots_per_case * len(cases)
label_suffix = '_'.join([label_suffix, "init-msmt"])
else:
nr_shots = nr_shots_per_case * len(cases)
self.ro_acq_digitized(False)
if prepare_for_timedomain:
self.prepare_for_timedomain(qubits=target_qubits + control_qubits)
for i, qb in enumerate(target_qubits):
mw_lutman = self.find_instrument(qb).instr_LutMan_MW.get_instr()
# load_phase_pulses already uploads all waveforms inside
mw_lutman.load_phase_pulses_to_AWG_lookuptable(
phases=np.arange(0, 360, 20) + phase_offsets[i] if phase_offsets else np.arange(0, 360, 20))
# prepare flux codeword list according to given step numbers and refocusing flag
# will be programmed in order of the list, but scheduled in parallel (if possible)
flux_cw_list = [flux_codeword + f'-{step}-refocus' if refocusing else flux_codeword + f'-{step}'
for step in flux_dance_steps]
p = mqo.parity_check_fidelity(
Q_idxs_ancilla,
Q_idxs_data,
Q_idxs_ramsey,
control_cases=cases,
flux_cw_list=flux_cw_list,
refocusing=refocusing,
platf_cfg=self.cfg_openql_platform_fn(),
initialization_msmt=initialization_msmt,
wait_time_before_flux=wait_time_before_flux_ns,
wait_time_after_flux=wait_time_after_flux_ns
)
s = swf.OpenQL_Sweep(openql_program=p, CCL=self.instr_CC.get_instr())
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(nr_shots))
d = self.get_int_logging_detector(
qubits=target_qubits + control_qubits,
result_logging_mode=result_logging_mode
)
shots_per_meas = int(np.floor(np.min([shots_per_meas, nr_shots])
/ len(cases))
* len(cases)
)
d.set_child_attr("nr_shots", shots_per_meas)
MC.set_detector_function(d)
# disable live plotting and soft averages
old_soft_avg = MC.soft_avg()
old_live_plot_enabled = MC.live_plot_enabled()
MC.soft_avg(1)
MC.live_plot_enabled(False)
label = f"Parity_check_fidelity_{target_qubits}_{control_qubits}_{self.msmt_suffix}_{label_suffix}"
MC.run(label, disable_snapshot_metadata=disable_metadata)
MC.soft_avg(old_soft_avg)
MC.live_plot_enabled(old_live_plot_enabled)
return True
# def measure_phase_corrections(
# self,
# target_qubits: List[str],
# control_qubits: List[str],
# flux_codeword: str="cz",
# measure_switched_target: bool=True,
# update: bool = True,
# prepare_for_timedomain=True,
# disable_cz: bool = False,
# disabled_cz_duration_ns: int = 60,
# cz_repetitions: int = 1,
# wait_time_before_flux_ns: int = 0,
# wait_time_after_flux_ns: int = 0,
# label="",
# verbose=True,
# extract_only=False,
# ):
# assert all(qb in self.qubits() for control_qubits + target_qubits)
# for q_target, q_control in zip(target_qubits, control_qubits):
# a = self.measure_conditional_oscillation(
# q_target,
# q_control,
# prepare_for_timedomain=prepare_for_timedomain
# extract_only=extract_only
# )
# if measure_switched_target:
# for q_target, q_control in zip(control_qubits, target_qubits):
# a = self.measure_conditional_oscillation(
# q_target,
# q_control,
# prepare_for_timedomain=prepare_for_timedomain
# extract_only=extract_only
# )
# for qb in target_qubits:
# mw_lutman = self.find_instrument(qb).instr_LutMan_MW.get_instr()
# return self
def measure_two_qubit_grovers_repeated(
self,
qubits: list,
nr_of_grover_iterations=40,
prepare_for_timedomain=True,
MC=None,
):
if prepare_for_timedomain:
self.prepare_for_timedomain()
if MC is None:
MC = self.instr_MC.get_instr()
for q in qubits:
assert q in self.qubits()
q0idx = self.find_instrument(qubits[-1]).cfg_qubit_nr()
q1idx = self.find_instrument(qubits[-2]).cfg_qubit_nr()
p = mqo.grovers_two_qubits_repeated(
qubits=[q1idx, q0idx],
nr_of_grover_iterations=nr_of_grover_iterations,
platf_cfg=self.cfg_openql_platform_fn(),
)
s = swf.OpenQL_Sweep(openql_program=p, CCL=self.instr_CC.get_instr())
d = self.get_correlation_detector()
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(nr_of_grover_iterations))
MC.set_detector_function(d)
MC.run(
"Grovers_two_qubit_repeated_{}_{}{}".format(
qubits[-2], qubits[-1], self.msmt_suffix
)
)
a = ma.MeasurementAnalysis()
return a
def measure_two_qubit_tomo_bell(
self,
qubits: list,
bell_state=0,
wait_after_flux=None,
analyze=True,
close_fig=True,
prepare_for_timedomain=True,
MC=None,
label="",
shots_logging: bool = False,
shots_per_meas=2 ** 16,
flux_codeword="cz"
):
"""
Prepares and performs a tomography of the one of the bell states, indicated
by its index.
Args:
bell_state (int):
index of prepared bell state
0 -> |Phi_m>=|00>-|11>
1 -> |Phi_p>=|00>+|11>
2 -> |Psi_m>=|01>-|10>
3 -> |Psi_p>=|01>+|10>
qubits (list):
list of names of the target qubits
wait_after_flux (float):
wait time (in seconds) after the flux pulse and
after-rotation before tomographic rotations
shots_logging (bool):
if False uses correlation mode to acquire shots for tomography.
if True uses single shot mode to acquire shots.
"""
q0 = qubits[0]
q1 = qubits[1]
if prepare_for_timedomain:
self.prepare_for_timedomain(qubits=[q0, q1])
if MC is None:
MC = self.instr_MC.get_instr()
assert q0 in self.qubits()
assert | |
<gh_stars>0
import copy
import logging
import os
import re
import subprocess
import tempfile
import uuid
from collections import OrderedDict
from functools import partial
from io import BytesIO
import bleach
from django.conf import settings
from django.contrib.staticfiles import finders
from django.dispatch import receiver
from django.utils.formats import date_format
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from PyPDF2 import PdfFileReader
from pytz import timezone
from reportlab.graphics import renderPDF
from reportlab.graphics.barcode.qr import QrCodeWidget
from reportlab.graphics.shapes import Drawing
from reportlab.lib.colors import Color
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.units import mm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.pdfmetrics import getAscentDescent
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import Paragraph
from pretix.base.invoice import ThumbnailingImageReader
from pretix.base.models import Order, OrderPosition
from pretix.base.settings import PERSON_NAME_SCHEMES
from pretix.base.signals import layout_text_variables
from pretix.base.templatetags.money import money_filter
from pretix.presale.style import get_fonts
logger = logging.getLogger(__name__)
DEFAULT_VARIABLES = OrderedDict((
("secret", {
"label": _("Ticket code (barcode content)"),
"editor_sample": "tdmruoekvkpbv1o2mv8xccvqcikvr58u",
"evaluate": lambda orderposition, order, event: orderposition.secret
}),
("order", {
"label": _("Order code"),
"editor_sample": "A1B2C",
"evaluate": lambda orderposition, order, event: orderposition.order.code
}),
("item", {
"label": _("Product name"),
"editor_sample": _("Sample product"),
"evaluate": lambda orderposition, order, event: str(orderposition.item.name)
}),
("variation", {
"label": _("Variation name"),
"editor_sample": _("Sample variation"),
"evaluate": lambda op, order, event: str(op.variation) if op.variation else ''
}),
("item_description", {
"label": _("Product description"),
"editor_sample": _("Sample product description"),
"evaluate": lambda orderposition, order, event: str(orderposition.item.description)
}),
("itemvar", {
"label": _("Product name and variation"),
"editor_sample": _("Sample product – sample variation"),
"evaluate": lambda orderposition, order, event: (
'{} - {}'.format(orderposition.item.name, orderposition.variation)
if orderposition.variation else str(orderposition.item.name)
)
}),
("item_category", {
"label": _("Product category"),
"editor_sample": _("Ticket category"),
"evaluate": lambda orderposition, order, event: (
str(orderposition.item.category.name) if orderposition.item.category else ""
)
}),
("price", {
"label": _("Price"),
"editor_sample": _("123.45 EUR"),
"evaluate": lambda op, order, event: money_filter(op.price, event.currency)
}),
("price_with_addons", {
"label": _("Price including add-ons"),
"editor_sample": _("123.45 EUR"),
"evaluate": lambda op, order, event: money_filter(op.price + sum(
p.price
for p in op.addons.all()
if not p.canceled
), event.currency)
}),
("attendee_name", {
"label": _("Attendee name"),
"editor_sample": _("<NAME>"),
"evaluate": lambda op, order, ev: op.attendee_name or (op.addon_to.attendee_name if op.addon_to else '')
}),
("event_name", {
"label": _("Event name"),
"editor_sample": _("Sample event name"),
"evaluate": lambda op, order, ev: str(ev.name)
}),
("event_date", {
"label": _("Event date"),
"editor_sample": _("May 31st, 2017"),
"evaluate": lambda op, order, ev: ev.get_date_from_display(show_times=False)
}),
("event_date_range", {
"label": _("Event date range"),
"editor_sample": _("May 31st – June 4th, 2017"),
"evaluate": lambda op, order, ev: ev.get_date_range_display(force_show_end=True)
}),
("event_begin", {
"label": _("Event begin date and time"),
"editor_sample": _("2017-05-31 20:00"),
"evaluate": lambda op, order, ev: date_format(
ev.date_from.astimezone(timezone(ev.settings.timezone)),
"SHORT_DATETIME_FORMAT"
) if ev.date_from else ""
}),
("event_begin_date", {
"label": _("Event begin date"),
"editor_sample": _("2017-05-31"),
"evaluate": lambda op, order, ev: date_format(
ev.date_from.astimezone(timezone(ev.settings.timezone)),
"SHORT_DATE_FORMAT"
) if ev.date_from else ""
}),
("event_begin_time", {
"label": _("Event begin time"),
"editor_sample": _("20:00"),
"evaluate": lambda op, order, ev: ev.get_time_from_display()
}),
("event_end", {
"label": _("Event end date and time"),
"editor_sample": _("2017-05-31 22:00"),
"evaluate": lambda op, order, ev: date_format(
ev.date_to.astimezone(timezone(ev.settings.timezone)),
"SHORT_DATETIME_FORMAT"
) if ev.date_to else ""
}),
("event_end_date", {
"label": _("Event end date"),
"editor_sample": _("2017-05-31"),
"evaluate": lambda op, order, ev: date_format(
ev.date_to.astimezone(timezone(ev.settings.timezone)),
"SHORT_DATE_FORMAT"
) if ev.date_to else ""
}),
("event_end_time", {
"label": _("Event end time"),
"editor_sample": _("22:00"),
"evaluate": lambda op, order, ev: date_format(
ev.date_to.astimezone(timezone(ev.settings.timezone)),
"TIME_FORMAT"
) if ev.date_to else ""
}),
("event_admission", {
"label": _("Event admission date and time"),
"editor_sample": _("2017-05-31 19:00"),
"evaluate": lambda op, order, ev: date_format(
ev.date_admission.astimezone(timezone(ev.settings.timezone)),
"SHORT_DATETIME_FORMAT"
) if ev.date_admission else ""
}),
("event_admission_time", {
"label": _("Event admission time"),
"editor_sample": _("19:00"),
"evaluate": lambda op, order, ev: date_format(
ev.date_admission.astimezone(timezone(ev.settings.timezone)),
"TIME_FORMAT"
) if ev.date_admission else ""
}),
("event_location", {
"label": _("Event location"),
"editor_sample": _("Random City"),
"evaluate": lambda op, order, ev: str(ev.location).replace("\n", "<br/>\n")
}),
("invoice_name", {
"label": _("Invoice address name"),
"editor_sample": _("<NAME>"),
"evaluate": lambda op, order, ev: order.invoice_address.name if getattr(order, 'invoice_address', None) else ''
}),
("invoice_company", {
"label": _("Invoice address company"),
"editor_sample": _("Sample company"),
"evaluate": lambda op, order, ev: order.invoice_address.company if getattr(order, 'invoice_address', None) else ''
}),
("addons", {
"label": _("List of Add-Ons"),
"editor_sample": _("Addon 1\nAddon 2"),
"evaluate": lambda op, order, ev: "<br/>".join([
'{} - {}'.format(p.item, p.variation) if p.variation else str(p.item)
for p in (
op.addons.all() if 'addons' in getattr(op, '_prefetched_objects_cache', {})
else op.addons.select_related('item', 'variation')
)
if not p.canceled
])
}),
("organizer", {
"label": _("Organizer name"),
"editor_sample": _("Event organizer company"),
"evaluate": lambda op, order, ev: str(order.event.organizer.name)
}),
("organizer_info_text", {
"label": _("Organizer info text"),
"editor_sample": _("Event organizer info text"),
"evaluate": lambda op, order, ev: str(order.event.settings.organizer_info_text)
}),
("now_date", {
"label": _("Printing date"),
"editor_sample": _("2017-05-31"),
"evaluate": lambda op, order, ev: date_format(
now().astimezone(timezone(ev.settings.timezone)),
"SHORT_DATE_FORMAT"
)
}),
("now_datetime", {
"label": _("Printing date and time"),
"editor_sample": _("2017-05-31 19:00"),
"evaluate": lambda op, order, ev: date_format(
now().astimezone(timezone(ev.settings.timezone)),
"SHORT_DATETIME_FORMAT"
)
}),
("now_time", {
"label": _("Printing time"),
"editor_sample": _("19:00"),
"evaluate": lambda op, order, ev: date_format(
now().astimezone(timezone(ev.settings.timezone)),
"TIME_FORMAT"
) if ev.date_admission else ""
}),
("seat", {
"label": _("Seat: Full name"),
"editor_sample": _("Ground floor, Row 3, Seat 4"),
"evaluate": lambda op, order, ev: str(op.seat if op.seat else
_('General admission') if ev.seating_plan_id is not None else "")
}),
("seat_zone", {
"label": _("Seat: zone"),
"editor_sample": _("Ground floor"),
"evaluate": lambda op, order, ev: str(op.seat.zone_name if op.seat else
_('General admission') if ev.seating_plan_id is not None else "")
}),
("seat_row", {
"label": _("Seat: row"),
"editor_sample": "3",
"evaluate": lambda op, order, ev: str(op.seat.row_name if op.seat else "")
}),
("seat_number", {
"label": _("Seat: seat number"),
"editor_sample": 4,
"evaluate": lambda op, order, ev: str(op.seat.seat_number if op.seat else "")
}),
))
@receiver(layout_text_variables, dispatch_uid="pretix_base_layout_text_variables_questions")
def variables_from_questions(sender, *args, **kwargs):
def get_answer(op, order, event, question_id):
a = None
if op.addon_to:
if 'answers' in getattr(op.addon_to, '_prefetched_objects_cache', {}):
try:
a = [a for a in op.addon_to.answers.all() if a.question_id == question_id][0]
except IndexError:
pass
else:
a = op.addon_to.answers.filter(question_id=question_id).first()
if 'answers' in getattr(op, '_prefetched_objects_cache', {}):
try:
a = [a for a in op.answers.all() if a.question_id == question_id][0]
except IndexError:
pass
else:
a = op.answers.filter(question_id=question_id).first()
if not a:
return ""
else:
return str(a).replace("\n", "<br/>\n")
d = {}
for q in sender.questions.all():
d['question_{}'.format(q.pk)] = {
'label': _('Question: {question}').format(question=q.question),
'editor_sample': _('<Answer: {question}>').format(question=q.question),
'evaluate': partial(get_answer, question_id=q.pk)
}
return d
def _get_attendee_name_part(key, op, order, ev):
return op.attendee_name_parts.get(key, '')
def _get_ia_name_part(key, op, order, ev):
return order.invoice_address.name_parts.get(key, '') if getattr(order, 'invoice_address', None) else ''
def get_variables(event):
v = copy.copy(DEFAULT_VARIABLES)
scheme = PERSON_NAME_SCHEMES[event.settings.name_scheme]
for key, label, weight in scheme['fields']:
v['attendee_name_%s' % key] = {
'label': _("Attendee name: {part}").format(part=label),
'editor_sample': scheme['sample'][key],
'evaluate': partial(_get_attendee_name_part, key)
}
v['invoice_name']['editor_sample'] = scheme['concatenation'](scheme['sample'])
v['attendee_name']['editor_sample'] = scheme['concatenation'](scheme['sample'])
for key, label, weight in scheme['fields']:
v['invoice_name_%s' % key] = {
'label': _("Invoice address name: {part}").format(part=label),
'editor_sample': scheme['sample'][key],
"evaluate": partial(_get_ia_name_part, key)
}
for recv, res in layout_text_variables.send(sender=event):
v.update(res)
return v
class Renderer:
def __init__(self, event, layout, background_file):
self.layout = layout
self.background_file = background_file
self.variables = get_variables(event)
if self.background_file:
self.bg_bytes = self.background_file.read()
self.bg_pdf = PdfFileReader(BytesIO(self.bg_bytes), strict=False)
else:
self.bg_bytes = None
self.bg_pdf = None
@classmethod
def _register_fonts(cls):
pdfmetrics.registerFont(TTFont('Open Sans', finders.find('fonts/OpenSans-Regular.ttf')))
pdfmetrics.registerFont(TTFont('Open Sans I', finders.find('fonts/OpenSans-Italic.ttf')))
pdfmetrics.registerFont(TTFont('Open Sans B', finders.find('fonts/OpenSans-Bold.ttf')))
pdfmetrics.registerFont(TTFont('Open Sans B I', finders.find('fonts/OpenSans-BoldItalic.ttf')))
for family, styles in get_fonts().items():
pdfmetrics.registerFont(TTFont(family, finders.find(styles['regular']['truetype'])))
if 'italic' in styles:
pdfmetrics.registerFont(TTFont(family + ' I', finders.find(styles['italic']['truetype'])))
if 'bold' in styles:
pdfmetrics.registerFont(TTFont(family + ' B', finders.find(styles['bold']['truetype'])))
if 'bolditalic' in styles:
pdfmetrics.registerFont(TTFont(family + ' B I', finders.find(styles['bolditalic']['truetype'])))
def _draw_poweredby(self, canvas: Canvas, op: OrderPosition, o: dict):
content = o.get('content', 'dark')
if content not in ('dark', 'white'):
content = 'dark'
img = finders.find('pretixpresale/pdf/powered_by_pretix_{}.png'.format(content))
ir = ThumbnailingImageReader(img)
try:
width, height = ir.resize(None, float(o['size']) * mm, 300)
except:
logger.exception("Can not resize image")
pass
canvas.drawImage(ir,
float(o['left']) * mm, float(o['bottom']) * mm,
width=width, height=height,
preserveAspectRatio=True, anchor='n',
mask='auto')
def _draw_barcodearea(self, canvas: Canvas, op: OrderPosition, o: dict):
content = o.get('content', 'secret')
if content == 'secret':
content = op.secret
elif content == 'pseudonymization_id':
content = op.pseudonymization_id
reqs = float(o['size']) * mm
qrw = QrCodeWidget(content, barLevel='H', barHeight=reqs, barWidth=reqs)
d = Drawing(reqs, reqs)
d.add(qrw)
qr_x = float(o['left']) * mm
qr_y = float(o['bottom']) * mm
renderPDF.draw(d, canvas, qr_x, qr_y)
def _get_ev(self, op, order):
return op.subevent or order.event
def _get_text_content(self, op: OrderPosition, order: Order, o: dict):
ev = self._get_ev(op, order)
if not o['content']:
return '(error)'
if o['content'] == 'other':
return o['text'].replace("\n", "<br/>\n")
elif o['content'].startswith('meta:'):
return ev.meta_data.get(o['content'][5:]) or ''
elif o['content'] in self.variables:
try:
return self.variables[o['content']]['evaluate'](op, order, ev)
except:
logger.exception('Failed to process variable.')
return '(error)'
return ''
def _draw_textarea(self, canvas: Canvas, op: OrderPosition, order: Order, o: dict):
font = o['fontfamily']
if o['bold']:
font += ' B'
if o['italic']:
font += ' I'
align_map = {
'left': TA_LEFT,
'center': | |
unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey(
"self", null=True, on_delete=models.SET_NULL
)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MODERATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# Users with this flag set are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
can_forge_sender: bool = models.BooleanField(default=False, db_index=True)
# Users with this flag set can create other users via API.
can_create_users: bool = models.BooleanField(default=False, db_index=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream",
null=True,
related_name="+",
on_delete=models.SET_NULL,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream",
null=True,
related_name="+",
on_delete=models.SET_NULL,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default="")
AVATAR_FROM_GRAVATAR = "G"
AVATAR_FROM_USER = "U"
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, "Hosted by Gravatar"),
(AVATAR_FROM_USER, "Uploaded by user"),
)
avatar_source: str = models.CharField(
default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1
)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = "W"
TUTORIAL_STARTED = "S"
TUTORIAL_FINISHED = "F"
TUTORIAL_STATES = (
(TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"),
)
tutorial_status: str = models.CharField(
default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1
)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default="[]")
zoom_token: Optional[object] = models.JSONField(default=None, null=True)
objects: UserManager = UserManager()
ROLE_ID_TO_NAME_MAP = {
ROLE_REALM_OWNER: gettext_lazy("Organization owner"),
ROLE_REALM_ADMINISTRATOR: gettext_lazy("Organization administrator"),
ROLE_MODERATOR: gettext_lazy("Moderator"),
ROLE_MEMBER: gettext_lazy("Member"),
ROLE_GUEST: gettext_lazy("Guest"),
}
def get_role_name(self) -> str:
return self.ROLE_ID_TO_NAME_MAP[self.role]
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {
v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values
}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get(
"rendered_value"
)
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append(
{
"id": field_data["id"],
"name": field_data["name"],
"type": field_data["type"],
"hint": field_data["hint"],
"field_data": field_data["field_data"],
"order": field_data["order"],
"value": value,
"rendered_value": rendered_value,
}
)
return data
def can_admin_user(self, target_user: "UserProfile") -> bool:
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_provisional_member(self) -> bool:
if self.is_moderator:
return False
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return (
self.role == UserProfile.ROLE_REALM_ADMINISTRATOR
or self.role == UserProfile.ROLE_REALM_OWNER
)
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def has_billing_access(self) -> bool:
return self.is_realm_owner or self.is_billing_admin
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@is_realm_owner.setter
def is_realm_owner(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_OWNER
elif self.role == UserProfile.ROLE_REALM_OWNER:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_moderator(self) -> bool:
return self.role == UserProfile.ROLE_MODERATOR
@is_moderator.setter
def is_moderator(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_MODERATOR
elif self.role == UserProfile.ROLE_MODERATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if (
self.is_realm_admin
or not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
):
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in [
"add_custom_emoji_policy",
"create_private_stream_policy",
"create_public_stream_policy",
"create_web_public_stream_policy",
"delete_own_message_policy",
"edit_topic_policy",
"invite_to_stream_policy",
"invite_to_realm_policy",
"move_messages_between_streams_policy",
"user_group_edit_policy",
]:
raise AssertionError("Invalid policy")
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_NOBODY:
return False
if policy_value == Realm.POLICY_EVERYONE:
return True
if self.is_realm_owner:
return True
if policy_value == Realm.POLICY_OWNERS_ONLY:
return False
if self.is_realm_admin:
return True
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_moderator:
return True
if policy_value == Realm.POLICY_MODERATORS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
assert policy_value == Realm.POLICY_FULL_MEMBERS_ONLY
return not self.is_provisional_member
def can_create_public_streams(self) -> bool:
return self.has_permission("create_public_stream_policy")
def can_create_private_streams(self) -> bool:
return self.has_permission("create_private_stream_policy")
def can_create_web_public_streams(self) -> bool:
if not self.realm.web_public_streams_enabled():
return False
return self.has_permission("create_web_public_stream_policy")
def can_subscribe_other_users(self) -> bool:
return self.has_permission("invite_to_stream_policy")
def can_invite_others_to_realm(self) -> bool:
return self.has_permission("invite_to_realm_policy")
def can_move_messages_between_streams(self) -> bool:
return self.has_permission("move_messages_between_streams_policy")
def can_edit_user_groups(self) -> bool:
return self.has_permission("user_group_edit_policy")
def can_edit_topic_of_any_message(self) -> bool:
return self.has_permission("edit_topic_policy")
def can_add_custom_emoji(self) -> bool:
return self.has_permission("add_custom_emoji_policy")
def can_delete_own_message(self) -> bool:
return self.has_permission("delete_own_message_policy")
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split(".")[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or "root")
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
objects = CTEManager()
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=100)
direct_members: Manager = models.ManyToManyField(
UserProfile, through="UserGroupMembership", related_name="direct_groups"
)
direct_subgroups: Manager = models.ManyToManyField(
"self",
symmetrical=False,
through="GroupGroupMembership",
through_fields=("supergroup", "subgroup"),
related_name="direct_supergroups",
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default="")
is_system_group: bool = models.BooleanField(default=False)
class Meta:
unique_together = (("realm", "name"),)
class UserGroupMembership(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_group: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE, related_name="+")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE, related_name="+")
class Meta:
unique_together = (("user_group", "user_profile"),)
class GroupGroupMembership(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
supergroup: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE, related_name="+")
subgroup: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE, related_name="+")
class Meta:
constraints = [
models.UniqueConstraint(
fields=["supergroup", "subgroup"], name="zerver_groupgroupmembership_uniq"
)
]
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is | |
<gh_stars>10-100
import logging
import os
import numpy as np
from openff.units import unit as pint_unit
from parmed.structure import Structure as ParmedStructureClass
from paprika.build.dummy import extract_dummy_atoms
from paprika.restraints.utils import get_bias_potential_type, parse_window
from paprika.utils import get_key, return_parmed_structure
logger = logging.getLogger(__name__)
_PI_ = np.pi
_plumed_unit_dict = {
pint_unit.kcal / pint_unit.mole: "kcal/mol",
pint_unit.kJ / pint_unit.mole: "kj/mol",
pint_unit.nanometer: "nm",
pint_unit.angstrom: "A",
pint_unit.picosecond: "ps",
pint_unit.femtosecond: "fs",
pint_unit.nanosecond: "ns",
}
class Plumed:
"""
This class converts restraints generated in `pAPRika` :class:`paprika.restraints.DAT_restraint` into `Plumed
<https://www.plumed.org/>`_ restraints.
.. note ::
The ``Plumed`` module is described in the reference below and the source code is available on Github
https://github.com/plumed/plumed2
`The PLUMED consortium. Promoting transparency and reproducibility in enhanced molecular simulations,
Nat. Methods 16, 670 (2019)`
.. todo::
possibly change this module to use the python wrapper of Plumed.
Examples
--------
>>> plumed = Plumed()
>>> plumed.file_name = 'plumed.dat'
>>> plumed.path = './windows'
>>> plumed.window_list = window_list
>>> plumed.restraint_list = restraint_list
>>> plumed.dump_to_file()
The commands above will write the restraints to ``windows/*/plumed.dat`` and contains the Plumed-style restraints
.. code-block::
UNITS LENGTH=A ENERGY=kcal/mol TIME=ns
# Collective variables
c1 : DISTANCE ATOMS=175,150 NOPBC
c2 : ANGLE ATOMS=176,175,150 NOPBC
c3 : ANGLE ATOMS=175,150,165 NOPBC
# Bias potential
RESTRAINT ARG=c7 AT= 6.000 KAPPA= 10.00
RESTRAINT ARG=c8 AT= 3.142 KAPPA= 200.00
RESTRAINT ARG=c9 AT= 3.142 KAPPA= 200.00
The positional restraints on dummy atoms, however, is not added automatically. This restraints on the dummy
atoms can be added to ``windows/*/plumed.dat`` using the code below.
>>> for window in window_list:
>>> structure = pmd.load_file("topology.prmtop", "coordinates.rst7")
>>> plumed.add_dummy_atoms_to_file(structure, window)
This appends the file with the following
.. code-block::
# Dummy Atoms
dm1: POSITION ATOM=123 NOPBC
dm2: POSITION ATOM=124 NOPBC
dm3: POSITION ATOM=125 NOPBC
RESTRAINT ...
ARG=dm1.x,dm1.y,dm1.z,dm2.x,dm2.y,dm2.z,dm3.x,dm3.y,dm3.z,
AT=18.600,19.020,27.950,18.600,19.020,24.950,18.600,21.220,22.750,
KAPPA=100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,
LABEL=dummy
... RESTRAINT
"""
@property
def path(self):
"""
os.PathLike: The parent directory that contains the APR simulation windows.
"""
return self._path
@path.setter
def path(self, value: str):
self._path = value
@property
def file_name(self):
"""
str: The Plumed file name where the restraints will be written to.
"""
return self._file_name
@file_name.setter
def file_name(self, value: str):
self._file_name = value
@property
def window_list(self):
"""
list: The list of APR windows where the Plumed files will be stored.
"""
return self._window_list
@window_list.setter
def window_list(self, value: list):
self._window_list = value
@property
def restraint_list(self):
"""
list: The list of restraints to convert.
"""
return self._restraint_list
@restraint_list.setter
def restraint_list(self, value: list):
self._restraint_list = value
@property
def uses_legacy_k(self):
"""
bool: Option to specify whether the force constant parsed into ``DAT_restraint`` uses
legacy force constant.
.. note ::
`AMBER`-style force constants have their value multiplied by a factor of 1/2 whereas
`GROMACS`/`NAMD`-style do not. Plumed follows the `GROMACS`/`NAMD`-style convention
for the force constant and the equations below demonstrates this point.
.. math::
:nowrap:
$$
\\begin{eqnarray}
U_{Amber} & = & K (x-x_{0})^2 \\\\
U_{Plumed} & = & \\frac{1}{2} k (x-x_{0})^2 \\\\
\\therefore K_{Amber} & = & \\frac{1}{2} k_{Plumed}
\\end{eqnarray}
$$
i.e. ``uses_legacy_k`` is set to True (default) the force constants will be multiplied by 2.
"""
return self._uses_legacy_k
@uses_legacy_k.setter
def uses_legacy_k(self, value: bool):
self._uses_legacy_k = value
@property
def output_units(self):
"""
dict: Dictionary of pint.unit.Quantity for the Plumed script. The dictionary requires the key values
of ``energy``, ``length``, ``time`` and will be converted to the appropriate string in the output script.
The default units are {"energy": unit.kcal/unit.mole, "length": unit.angstrom, "time": unit.picosecond}.
The units supported are
_plumed_unit_dict = {
"energy": {
unit.kcal / unit.mole,
unit.kJ / unit.mole,
},
"length": {
unit.nanometer,
unit.angstrom,
},
"time": {
unit.picosecond,
unit.femtosecond,
unit.nanosecond,
}
}
"""
return self._output_units
@output_units.setter
def output_units(self, value: dict):
self._output_units = value
def __init__(self):
self._file_name = "plumed.dat"
self._restraint_list = None
self._window_list = None
self._path = "./"
self._uses_legacy_k = True
self.k_factor = 1.0
self._output_units = None
self.header_line = None
self.group_index = None
self.group_atoms = None
def _initialize(self):
# Set factor for spring constant
if self.uses_legacy_k:
self.k_factor = 2.0
# Check user-specified units
if self.output_units is None:
self.output_units = {
"energy": pint_unit.kcal / pint_unit.mole,
"length": pint_unit.angstrom,
"time": pint_unit.nanosecond,
}
_check_plumed_units(self.output_units)
# header line
self.header_line = (
f"UNITS LENGTH={_plumed_unit_dict[self.output_units['length']]} "
f"ENERGY={_plumed_unit_dict[self.output_units['energy']]} "
f"TIME={_plumed_unit_dict[self.output_units['time']]}"
)
def dump_to_file(self):
"""
Write the `Plumed`-style restraints to file.
"""
self._initialize()
# Loop over APR windows
for windows in self.window_list:
window, phase = parse_window(windows)
# Check if file exist and write header line
with open(os.path.join(self.path, windows, self.file_name), "w") as file:
file.write(self.header_line + "\n")
cv_index = 1
cv_dict = {}
cv_lines = []
bias_lines = []
self.group_index = 1
self.group_atoms = {}
# Parse each restraint in the list
for restraint in self.restraint_list:
# Skip restraint if the target or force constant is not defined.
# Example: wall restraints only used during the attach phase.
try:
target = restraint.phase[phase]["targets"][window]
force_constant = (
restraint.phase[phase]["force_constants"][window]
* self.k_factor
)
except TypeError:
continue
# Convert list to comma-separated string
atom_index = self._get_atom_indices(restraint)
atom_string = ",".join(map(str, atom_index))
# Convert units to the correct type for PLUMED module
if restraint.restraint_type == "distance":
target = target.to(pint_unit.angstrom)
force_constant = force_constant.to(
self.output_units["energy"] / self.output_units["length"] ** 2
)
elif (
restraint.restraint_type == "angle"
or restraint.restraint_type == "torsion"
):
target = target.to(pint_unit.radians)
force_constant = force_constant.to(
self.output_units["energy"] / pint_unit.radians ** 2
)
# Determine bias type for this restraint
bias_type = get_bias_potential_type(restraint, phase, window)
# Append cv strings to lists
# The code below prevents duplicate cv definition.
# While not necessary, it makes the plumed file cleaner.
if not get_key(cv_dict, atom_string):
cv_key = f"c{cv_index}"
cv_dict[cv_key] = atom_string
cv_lines.append(
f"{cv_key}: {restraint.restraint_type.upper()} ATOMS={atom_string} NOPBC\n"
)
bias_lines.append(
f"{bias_type.upper()} ARG={cv_key} AT={target.magnitude:.4f} KAPPA="
f"{force_constant.magnitude:.2f}\n"
)
else:
cv_key = get_key(cv_dict, atom_string)[0]
bias_lines.append(
f"{bias_type.upper()} ARG={cv_key} AT={target.magnitude:.4f} KAPPA="
f"{force_constant.magnitude:.2f}\n"
)
# Increment cv index
cv_index += 1
# Write collective variables to file
self._write_colvar_to_file(windows, cv_lines, bias_lines)
def _write_colvar_to_file(self, window, cv_list, bias_list):
with open(os.path.join(self.path, window, self.file_name), "a") as file:
if len(self.group_atoms) != 0:
file.write("# Centroid groups\n")
for key, value in self.group_atoms.items():
file.write(f"{key}: COM ATOMS={value}\n")
file.write("# Collective variables\n")
for line in cv_list:
file.write(line)
file.write("# Bias potentials\n")
for line in bias_list:
file.write(line)
def _get_atom_indices(self, restraint):
# Check atom index setting
index_shift = 0
if not restraint.amber_index:
index_shift = 1
logger.debug("Atom indices starts from 0 --> shifting indices by 1.")
# Collect DAT atom indices
atom_index = []
if not restraint.group1:
atom_index.append(restraint.index1[0] + index_shift)
else:
igr1 = ""
for index in restraint.index1:
igr1 += "{},".format(index + index_shift)
if not get_key(self.group_atoms, igr1):
self.group_atoms[f"g{self.group_index}"] = igr1
self.group_index += 1
atom_index.append(get_key(self.group_atoms, igr1)[0])
if not restraint.group2:
atom_index.append(restraint.index2[0] + index_shift)
else:
igr2 = ""
for index in restraint.index2:
igr2 += "{},".format(index + index_shift)
if not get_key(self.group_atoms, igr2):
self.group_atoms[f"g{self.group_index}"] = igr2
self.group_index += 1
atom_index.append(get_key(self.group_atoms, igr2)[0])
if restraint.index3 and not restraint.group3:
atom_index.append(restraint.index3[0] + index_shift)
elif restraint.group3:
igr3 = ""
for index in restraint.index3:
igr3 += "{},".format(index + index_shift)
if not get_key(self.group_atoms, igr3):
self.group_atoms[f"g{self.group_index}"] = igr3
self.group_index += 1
atom_index.append(get_key(self.group_atoms, igr3)[0])
if restraint.index4 and not restraint.group4:
atom_index.append(restraint.index4[0] + index_shift)
elif restraint.group4:
igr4 = ""
for index in restraint.index4:
igr4 += "{},".format(index + index_shift)
if not get_key(self.group_atoms, igr4):
self.group_atoms[f"g{self.group_index}"] = igr4
self.group_index += 1
atom_index.append(get_key(self.group_atoms, igr4)[0])
return atom_index
def add_dummy_atom_restraints(self, structure, window, path=None):
"""
Add positional restraints on dummy atoms to the restraint files.
Parameters
----------
structure: os.PathLike or :class:`parmed.Structure`
The reference structure that is used to determine the absolute coordinate of the dummy atoms.
window: str
APR window where the structure is stored for extracting the dummy atom positions.
path: os.PathLike, optional, default=None
Path of the restraint file. If set to ``None`` (default) self.path will be used.
"""
# Load structure file
if isinstance(structure, str):
structure = return_parmed_structure(structure)
elif isinstance(structure, ParmedStructureClass):
pass
else:
raise Exception(
"add_dummy_atoms_to_file does not support the type associated with structure: "
+ type(structure)
)
# Extract dummy atoms
dummy_atoms = extract_dummy_atoms(structure, serial=True)
# Write dummy atom info to plumed file
if path is not None:
restraint_file = os.path.join(path, window, self.file_name)
else:
restraint_file = os.path.join(self.path, window, self.file_name)
if os.path.isfile(restraint_file):
with open(restraint_file, "a") as file:
self._write_dummy_to_file(file, dummy_atoms)
else:
raise Exception(f"ERROR: '{restraint_file}' file does not exists!")
@staticmethod
def _write_dummy_to_file(file, dummy_atoms, kpos=100.0):
"""
Append to the "plumed.dat" file the dummy atoms colvar definition and position restraints
Parameters
----------
| |
@property
def symbolDimensions(self) -> Tuple[str, ...]:
"""Set of fields that determine database table name."""
return self.__symbolDimensions
@symbolDimensions.setter
def symbolDimensions(self, value: Tuple[str, ...]):
self.__symbolDimensions = value
self._property_changed('symbolDimensions')
@property
def priceMethod(self) -> str:
"""Method used to calculate net price."""
return self.__priceMethod
@priceMethod.setter
def priceMethod(self, value: str):
self.__priceMethod = value
self._property_changed('priceMethod')
@property
def quotingStyle(self) -> str:
return self.__quotingStyle
@quotingStyle.setter
def quotingStyle(self, value: str):
self.__quotingStyle = value
self._property_changed('quotingStyle')
@property
def optionTypeSDR(self) -> str:
"""An indication of the type of the option."""
return self.__optionTypeSDR
@optionTypeSDR.setter
def optionTypeSDR(self, value: str):
self.__optionTypeSDR = value
self._property_changed('optionTypeSDR')
@property
def scenarioGroupId(self) -> str:
"""Marquee unique scenario group identifier"""
return self.__scenarioGroupId
@scenarioGroupId.setter
def scenarioGroupId(self, value: str):
self.__scenarioGroupId = value
self._property_changed('scenarioGroupId')
@property
def errorMessage(self) -> str:
"""Error message to correspond to error in factor field."""
return self.__errorMessage
@errorMessage.setter
def errorMessage(self, value: str):
self.__errorMessage = value
self._property_changed('errorMessage')
@property
def averageImpliedVariance(self) -> float:
"""Average variance of an asset implied by observations of market prices."""
return self.__averageImpliedVariance
@averageImpliedVariance.setter
def averageImpliedVariance(self, value: float):
self.__averageImpliedVariance = value
self._property_changed('averageImpliedVariance')
@property
def avgTradeRateDescription(self) -> str:
"""Description of the Stock's Average Trading Rate on the particular date."""
return self.__avgTradeRateDescription
@avgTradeRateDescription.setter
def avgTradeRateDescription(self, value: str):
self.__avgTradeRateDescription = value
self._property_changed('avgTradeRateDescription')
@property
def midPrice(self) -> float:
"""The mid price."""
return self.__midPrice
@midPrice.setter
def midPrice(self, value: float):
self.__midPrice = value
self._property_changed('midPrice')
@property
def fraction(self) -> float:
"""Fraction."""
return self.__fraction
@fraction.setter
def fraction(self, value: float):
self.__fraction = value
self._property_changed('fraction')
@property
def stsCreditMarket(self) -> str:
"""Credit risk market."""
return self.__stsCreditMarket
@stsCreditMarket.setter
def stsCreditMarket(self, value: str):
self.__stsCreditMarket = value
self._property_changed('stsCreditMarket')
@property
def assetCountShort(self) -> float:
"""Number of assets in a portfolio with short exposure."""
return self.__assetCountShort
@assetCountShort.setter
def assetCountShort(self, value: float):
self.__assetCountShort = value
self._property_changed('assetCountShort')
@property
def stsEmDm(self) -> str:
"""Emerging or developed market classification."""
return self.__stsEmDm
@stsEmDm.setter
def stsEmDm(self, value: str):
self.__stsEmDm = value
self._property_changed('stsEmDm')
@property
def requiredCollateralValue(self) -> float:
"""Amount of collateral required to cover contractual obligation."""
return self.__requiredCollateralValue
@requiredCollateralValue.setter
def requiredCollateralValue(self, value: float):
self.__requiredCollateralValue = value
self._property_changed('requiredCollateralValue')
@property
def tcmCostHorizon2Day(self) -> float:
"""TCM cost with a 2 day time horizon."""
return self.__tcmCostHorizon2Day
@tcmCostHorizon2Day.setter
def tcmCostHorizon2Day(self, value: float):
self.__tcmCostHorizon2Day = value
self._property_changed('tcmCostHorizon2Day')
@property
def pendingLoanCount(self) -> float:
"""The number of pending loans that exist on a given date."""
return self.__pendingLoanCount
@pendingLoanCount.setter
def pendingLoanCount(self, value: float):
self.__pendingLoanCount = value
self._property_changed('pendingLoanCount')
@property
def queueInLots(self) -> float:
"""The Queue size in Lots (if applicable) of the stock on the particular date."""
return self.__queueInLots
@queueInLots.setter
def queueInLots(self, value: float):
self.__queueInLots = value
self._property_changed('queueInLots')
@property
def priceRangeInTicksDescription(self) -> str:
"""Description of the Stock's Price Range in Ticks on the particular date."""
return self.__priceRangeInTicksDescription
@priceRangeInTicksDescription.setter
def priceRangeInTicksDescription(self, value: str):
self.__priceRangeInTicksDescription = value
self._property_changed('priceRangeInTicksDescription')
@property
def date(self) -> datetime.date:
"""ISO 8601 formatted date."""
return self.__date
@date.setter
def date(self, value: datetime.date):
self.__date = value
self._property_changed('date')
@property
def tenderOfferExpirationDate(self) -> str:
"""Expiration date of the tender offer."""
return self.__tenderOfferExpirationDate
@tenderOfferExpirationDate.setter
def tenderOfferExpirationDate(self, value: str):
self.__tenderOfferExpirationDate = value
self._property_changed('tenderOfferExpirationDate')
@property
def optionExpirationFrequency(self) -> str:
"""Option Expiration Frequency provided by Participant (e.g., Daily, Monthly)."""
return self.__optionExpirationFrequency
@optionExpirationFrequency.setter
def optionExpirationFrequency(self, value: str):
self.__optionExpirationFrequency = value
self._property_changed('optionExpirationFrequency')
@property
def highUnadjusted(self) -> float:
"""Unadjusted high level of an asset based on official exchange fixing or calculation agent marked level."""
return self.__highUnadjusted
@highUnadjusted.setter
def highUnadjusted(self, value: float):
self.__highUnadjusted = value
self._property_changed('highUnadjusted')
@property
def sourceCategory(self) -> str:
"""Source category of event."""
return self.__sourceCategory
@sourceCategory.setter
def sourceCategory(self, value: str):
self.__sourceCategory = value
self._property_changed('sourceCategory')
@property
def volumeUnadjusted(self) -> float:
"""Unadjusted volume traded."""
return self.__volumeUnadjusted
@volumeUnadjusted.setter
def volumeUnadjusted(self, value: float):
self.__volumeUnadjusted = value
self._property_changed('volumeUnadjusted')
@property
def avgTradeRateLabel(self):
"""Label of the Stock's Average Trading Rate on the particular date."""
return self.__avgTradeRateLabel
@avgTradeRateLabel.setter
def avgTradeRateLabel(self, value):
self.__avgTradeRateLabel = value
self._property_changed('avgTradeRateLabel')
@property
def tcmCostParticipationRate5Pct(self) -> float:
"""TCM cost with a 5 percent participation rate."""
return self.__tcmCostParticipationRate5Pct
@tcmCostParticipationRate5Pct.setter
def tcmCostParticipationRate5Pct(self, value: float):
self.__tcmCostParticipationRate5Pct = value
self._property_changed('tcmCostParticipationRate5Pct')
@property
def isActive(self) -> bool:
"""Whether this entry is active."""
return self.__isActive
@isActive.setter
def isActive(self, value: bool):
self.__isActive = value
self._property_changed('isActive')
@property
def growthScore(self) -> float:
"""Growth percentile relative to Americas coverage universe (a higher score means faster growth)."""
return self.__growthScore
@growthScore.setter
def growthScore(self, value: float):
self.__growthScore = value
self._property_changed('growthScore')
@property
def bufferThreshold(self) -> float:
"""The required buffer between holdings and on loan quantity for an asset."""
return self.__bufferThreshold
@bufferThreshold.setter
def bufferThreshold(self, value: float):
self.__bufferThreshold = value
self._property_changed('bufferThreshold')
@property
def encodedStats(self) -> str:
"""Asset stats object in json format."""
return self.__encodedStats
@encodedStats.setter
def encodedStats(self, value: str):
self.__encodedStats = value
self._property_changed('encodedStats')
@property
def priceFormingContinuationData(self) -> str:
"""An indication of whether an SB swap transaction is a post-execution event that affects the price of the swap transaction, e.g. terminations, assignments, novations, exchanges, transfers, amendments, conveyances or extinguishing of rights that change the price of the SB swap."""
return self.__priceFormingContinuationData
@priceFormingContinuationData.setter
def priceFormingContinuationData(self, value: str):
self.__priceFormingContinuationData = value
self._property_changed('priceFormingContinuationData')
@property
def adjustedShortInterest(self) -> float:
"""Adjusted Short Interest rate."""
return self.__adjustedShortInterest
@adjustedShortInterest.setter
def adjustedShortInterest(self, value: float):
self.__adjustedShortInterest = value
self._property_changed('adjustedShortInterest')
@property
def askSize(self) -> float:
"""The number of shares, lots, or contracts willing to sell at the Ask price."""
return self.__askSize
@askSize.setter
def askSize(self, value: float):
self.__askSize = value
self._property_changed('askSize')
@property
def mdapiType(self) -> str:
"""The MDAPI data type - DEPRECATED."""
return self.__mdapiType
@mdapiType.setter
def mdapiType(self, value: str):
self.__mdapiType = value
self._property_changed('mdapiType')
@property
def group(self) -> str:
"""Region or sector following the MSCI Global Industry Classification Standard (GICS)."""
return self.__group
@group.setter
def group(self, value: str):
self.__group = value
self._property_changed('group')
@property
def estimatedSpread(self) -> float:
"""Average bid-ask quoted spread of the stock (bps) over the execution horizon (1 day)."""
return self.__estimatedSpread
@estimatedSpread.setter
def estimatedSpread(self, value: float):
self.__estimatedSpread = value
self._property_changed('estimatedSpread')
@property
def resource(self) -> str:
"""The event resource. For example: Asset"""
return self.__resource
@resource.setter
def resource(self, value: str):
self.__resource = value
self._property_changed('resource')
@property
def created(self) -> datetime.datetime:
"""Created time."""
return self.__created
@created.setter
def created(self, value: datetime.datetime):
self.__created = value
self._property_changed('created')
@property
def averageRealizedVolatility(self) -> float:
"""Average volatility of an asset realized by observations of market prices."""
return self.__averageRealizedVolatility
@averageRealizedVolatility.setter
def averageRealizedVolatility(self, value: float):
self.__averageRealizedVolatility = value
self._property_changed('averageRealizedVolatility')
@property
def tcmCost(self) -> float:
"""Pretrade computation of trading out cost."""
return self.__tcmCost
@tcmCost.setter
def tcmCost(self, value: float):
self.__tcmCost = value
self._property_changed('tcmCost')
@property
def sustainJapan(self) -> bool:
"""True if the stock is on the SUSTAIN Japan list as of the corresponding date. False if the stock is removed from the SUSTAIN Japan list on the corresponding date."""
return self.__sustainJapan
@sustainJapan.setter
def sustainJapan(self, value: bool):
self.__sustainJapan = value
self._property_changed('sustainJapan')
@property
def navSpread(self) -> float:
"""Net asset value spread. Quoted (running) spread (mid) of the underlying basket of single name CDS. (Theoretical Index value). In basis points."""
return self.__navSpread
@navSpread.setter
def navSpread(self, value: float):
self.__navSpread = value
self._property_changed('navSpread')
@property
def bidPrice(self) -> float:
"""Latest Bid Price (price willing to buy)."""
return self.__bidPrice
@bidPrice.setter
def bidPrice(self, value: float):
self.__bidPrice = value
self._property_changed('bidPrice')
@property
def dollarTotalReturn(self) -> float:
"""The dollar total return of an instrument."""
return self.__dollarTotalReturn
@dollarTotalReturn.setter
def dollarTotalReturn(self, value: float):
self.__dollarTotalReturn = value
self._property_changed('dollarTotalReturn')
@property
def blockUnit(self) -> str:
"""Unit of measure used for Block trades."""
return self.__blockUnit
@blockUnit.setter
def blockUnit(self, value: str):
self.__blockUnit = value
self._property_changed('blockUnit')
@property
def hedgeTrackingError(self) -> float:
"""Standard deviation of the difference in the portfolio and benchmark returns over time."""
return self.__hedgeTrackingError
@hedgeTrackingError.setter
def hedgeTrackingError(self, value: float):
self.__hedgeTrackingError = value
self._property_changed('hedgeTrackingError')
@property
def marketCapCategory(self) -> str:
"""Category of market capitalizations a fund is focused on from an investment perspective. Same view permissions as the asset."""
return self.__marketCapCategory
@marketCapCategory.setter
def marketCapCategory(self, value: str):
self.__marketCapCategory = value
self._property_changed('marketCapCategory')
@property
def historicalVolume(self) -> float:
"""One month rolling average."""
return self.__historicalVolume
@historicalVolume.setter
def historicalVolume(self, value: float):
self.__historicalVolume = value
self._property_changed('historicalVolume')
@property
def esNumericPercentile(self) -> float:
"""Sector relative percentile based on E&S numeric score."""
| |
<reponame>occamLab/invisible-map-generation
"""Some helpful functions for visualizing and analyzing graphs.
"""
from enum import Enum
from typing import Union, List, Dict, Tuple, Any
import g2o
from matplotlib import pyplot as plt
from matplotlib import cm
import numpy as np
from g2o import SE3Quat, EdgeProjectPSI2UV, Quaternion
from scipy.spatial.transform import Rotation as Rot
import shapely.geometry
from shapely.geometry import LineString
from map_processing.graph_vertex_edge_classes import VertexType
# The camera axis used to get tag measurements are flipped relative to the phone frame used for odom measurements
camera_to_odom_transform = np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
default_dummy_weights = np.array([-1, 1e2, -1])
assumed_focal_length = 1464
# The ground truth tags for the 6-17-21 OCCAM Room
s = np.sin(np.pi / 4)
c = np.cos(np.pi / 4)
occam_room_tags = np.asarray([SE3Quat([0, 63.25 * 0.0254, 0, 0, 0, 0, 1]),
SE3Quat([269 * 0.0254, 48.5 * 0.0254, -31.25 * 0.0254, 0, 0, 0, 1]),
SE3Quat([350 * 0.0254, 58.25 * 0.0254, 86.25 * 0.0254, 0, c, 0, -s]),
SE3Quat([345.5 * 0.0254, 58 * 0.0254, 357.75 * 0.0254, 0, 1, 0, 0]),
SE3Quat([240 * 0.0254, 86 * 0.0254, 393 * 0.0254, 0, 1, 0, 0]),
SE3Quat([104 * 0.0254, 31.75 * 0.0254, 393 * 0.0254, 0, 1, 0, 0]),
SE3Quat([-76.75 * 0.0254, 56.5 * 0.0254, 316.75 * 0.0254, 0, c, 0, s]),
SE3Quat([-76.75 * 0.0254, 54 * 0.0254, 75 * 0.0254, 0, c, 0, s])])
class MapInfo:
"""Container for identifying information for a graph (useful for caching process)
Attributes:
map_name (str): Specifies the child of the "maps" database reference to upload the optimized
graph to; also passed as the map_name argument to the cache_map method
map_json_name (str): String corresponding to the bucket blob name of the json
map_dct (dict): String of json containing graph
"""
def __init__(self, map_name: str, map_json_name: str, map_dct: Dict = None, uid: str = None):
self.map_name: str = str(map_name)
self.map_json_blob_name: str = str(map_json_name)
self.map_dct: Union[dict, str] = dict(map_dct) if map_dct is not None else {}
self.uid = uid
def __hash__(self):
return self.map_json_blob_name.__hash__()
def __repr__(self):
return self.map_name
def se3_quat_average(transforms):
"""TODO: documentation
"""
translation_average = sum([t.translation() / len(transforms) for t in transforms])
epsilons = np.ones(len(transforms), )
converged = False
quat_average = None
while not converged:
quat_sum = sum(np.array([t.orientation().x(), t.orientation().y(), t.orientation().z(), t.orientation().w()]) \
* epsilons[idx] for idx, t in enumerate(transforms))
quat_average = quat_sum / np.linalg.norm(quat_sum)
same_epsilon = [np.linalg.norm(epsilons[idx] * np.array([t.orientation().x(), t.orientation().y(),
t.orientation().z(), t.orientation().w()]) - \
quat_average) for idx, t in enumerate(transforms)]
swap_epsilon = [np.linalg.norm(-epsilons[idx] * np.array([t.orientation().x(), t.orientation().y(),
t.orientation().z(), t.orientation().w()]) - \
quat_average) for idx, t in enumerate(transforms)]
change_mask = np.greater(same_epsilon, swap_epsilon)
epsilons[change_mask] = -epsilons[change_mask]
converged = not np.any(change_mask)
average_as_quat = Quaternion(quat_average[3], quat_average[0], quat_average[1], quat_average[2])
return SE3Quat(average_as_quat, translation_average)
def optimizer_to_map(vertices, optimizer: g2o.SparseOptimizer, is_sparse_bundle_adjustment=False) -> \
Dict[str, Union[List, np.ndarray]]:
"""Convert a :class: g2o.SparseOptimizer to a dictionary containing locations of the phone, tags, and waypoints.
Args:
vertices: A dictionary of vertices. This is used to lookup the type of vertex pulled from the optimizer.
optimizer: a :class: g2o.SparseOptimizer containing a map.
is_sparse_bundle_adjustment: True if the optimizer is based on sparse bundle adjustment and False otherwise.
Returns:
A dictionary with fields 'locations', 'tags', and 'waypoints'. The 'locations' key covers a (n, 8) array
containing x, y, z, qx, qy, qz, qw locations of the phone as well as the vertex uid at n points. The 'tags' and
'waypoints' keys cover the locations of the tags and waypoints in the same format.
"""
locations = np.reshape([], [0, 9])
tagpoints = np.reshape([], [0, 3])
tags = np.reshape([], [0, 8])
waypoints = np.reshape([], [0, 8])
waypoint_metadata = []
exaggerate_tag_corners = True
for i in optimizer.vertices():
mode = vertices[i].mode
if mode == VertexType.TAGPOINT:
tag_vert = find_connected_tag_vert(optimizer, optimizer.vertex(i))
if tag_vert is None:
# TODO: double-check that the right way to handle this case is to continue
continue
location = optimizer.vertex(i).estimate()
if exaggerate_tag_corners:
location = location * np.array([10, 10, 1])
tagpoints = np.vstack((tagpoints, tag_vert.estimate().inverse() * location))
else:
location = optimizer.vertex(i).estimate().translation()
rotation = optimizer.vertex(i).estimate().rotation().coeffs()
if mode == VertexType.ODOMETRY:
pose = np.concatenate([location, rotation, [i], [vertices[i].meta_data['pose_id']]])
locations = np.vstack([locations, pose])
elif mode == VertexType.TAG:
pose = np.concatenate([location, rotation, [i]])
if is_sparse_bundle_adjustment:
# adjusts tag based on the position of the tag center
pose[:-1] = (SE3Quat([0, 0, 1, 0, 0, 0, 1]).inverse() * SE3Quat(vertices[i].estimate)).to_vector()
if 'tag_id' in vertices[i].meta_data:
pose[-1] = vertices[i].meta_data['tag_id']
tags = np.vstack([tags, pose])
elif mode == VertexType.WAYPOINT:
pose = np.concatenate([location, rotation, [i]])
waypoints = np.vstack([waypoints, pose])
waypoint_metadata.append(vertices[i].meta_data)
# convert to array for sorting
locations = np.array(locations)
locations = locations[locations[:, -1].argsort()]
return {'locations': locations, 'tags': np.array(tags), 'tagpoints': tagpoints,
'waypoints': [waypoint_metadata, np.array(waypoints)]}
def optimizer_to_map_chi2(graph, optimizer: g2o.SparseOptimizer, is_sparse_bundle_adjustment=False) -> \
Dict[str, Union[List, np.ndarray]]:
"""Convert a :class: g2o.SparseOptimizer to a dictionary containing locations of the phone, tags, waypoints, and
per-odometry edge chi2 information.
This function works by calling `optimizer_to_map` and adding a new entry that is a vector of the per-odometry edge
chi2 information as calculated by the `map_odom_to_adj_chi2` method of the `Graph` class.
Args:
graph (Graph): A graph instance whose vertices attribute is passed as the first argument to `optimizer_to_map`
and whose `map_odom_to_adj_chi2` method is used.
optimizer: a :class: g2o.SparseOptimizer containing a map, which is passed as the second argument to
`optimizer_to_map`.
is_sparse_bundle_adjustment: True if the optimizer is based on sparse bundle adjustment and False otherwise;
passed as the `is_sparse_bundle_adjustment` keyword argument to `optimizer_to_map`.
Returns:
A dictionary with fields 'locations', 'tags', 'waypoints', and 'locationsAdjChi2'. The 'locations' key covers a
(n, 8) array containing x, y, z, qx, qy, qz, qw locations of the phone as well as the vertex uid at n points.
The 'tags' and 'waypoints' keys cover the locations of the tags and waypoints in the same format. Associated
with each odometry node is a chi2 calculated from the `map_odom_to_adj_chi2` method of the `Graph` class, which
is stored in the vector in the locationsAdjChi2 vector.
"""
ret_map = optimizer_to_map(graph.vertices, optimizer, is_sparse_bundle_adjustment=is_sparse_bundle_adjustment)
locations_shape = np.shape(ret_map["locations"])
locations_adj_chi2 = np.zeros([locations_shape[0], 1])
visible_tags_count = np.zeros([locations_shape[0], 1])
for i, odom_node_vec in enumerate(ret_map["locations"]):
uid = round(odom_node_vec[7]) # UID integer is stored as a floating point number, so cast it to an integer
locations_adj_chi2[i], visible_tags_count[i] = graph.map_odom_to_adj_chi2(uid)
ret_map["locationsAdjChi2"] = locations_adj_chi2
ret_map["visibleTagsCount"] = visible_tags_count
return ret_map
def find_connected_tag_vert(optimizer, location_vert):
# TODO: it would be nice if we didn't have to scan the entire graph
for edge in optimizer.edges():
if type(edge) == EdgeProjectPSI2UV:
if edge.vertex(0).id() == location_vert.id():
return edge.vertex(2)
return None
def measurement_to_matrix(measurement):
transformation = np.eye(4)
transformation[:3, 3] = measurement[:3]
transformation[:3, :3] = Rot.from_quat(measurement[3:7]).as_matrix()
return transformation
def pose_to_isometry(pose):
"""Convert a pose vector to a :class: g2o.Isometry3d instance.
Args:
pose: A 7 element 1-d numpy array encoding x, y, z, qx, qy, qz, and qw respectively.
Returns:
A :class: g2o.Isometry3d instance encoding the same information as the input pose.
"""
return g2o.Isometry3d(g2o.Quaternion(*np.roll(pose[3:7], 1)), pose[:3])
def pose_to_se3quat(pose):
"""Convert a pose vector to a :class: g2o.Isometry3d instance.
Args:
pose: A 7 element 1-d numpy array encoding x, y, z, qx, qy, qz, and qw respectively.
Returns:
A :class: g2o.Isometry3d instance encoding the same information as the input pose.
"""
return g2o.SE3Quat(g2o.Quaternion(*np.roll(pose[3:7], 1)), pose[:3])
def isometry_to_pose(isometry):
"""Convert a :class: g2o.Isometry3d to a vector containing a pose.
Args:
isometry: A :class: g2o.Isometry3d instance.
Returns:
A 7 element 1-d numpy array encoding x, y, z, qx, qy, qz, and qw respectively.
"""
return np.concatenate(
[isometry.translation(), isometry.rotation().coeffs()])
def global_yaw_effect_basis(rotation, gravity_axis='z'):
"""Form a basis which describes the effect of a change in global yaw on a local measurement's qx, qy, and qz.
Since the accelerometer measures gravitational acceleration, it can accurately measure the global z-azis but its
measurement of the orthogonal axis are less reliable.
Args:
rotation: A :class: scipy.spatial.transform.Rotation encoding a local rotation.
gravity_axis: A character specifying the gravity axis (e.g., 'z')
Returns:
A 3x3 numpy array where the columns are the new basis.
"""
rotation1 = Rot.from_euler(gravity_axis, 0.05) * rotation
change = rotation1.as_quat()[:3] - rotation.as_quat()[:3]
return np.linalg.svd(change[:, np.newaxis])[0]
def locations_from_transforms(locations):
for i in range(locations.shape[0]):
locations[i, :7] = SE3Quat(locations[i, :7]).inverse().to_vector()
return locations
def plot_metrics(sweep: np.ndarray, metrics: np.ndarray, log_sweep: bool = False, log_metric: bool = False):
filtered_metrics = metrics > -1
sweep_plot = np.log(sweep) if log_sweep else sweep
to_plot = np.log(metrics) if log_metric else metrics
fig, ax = | |
# ~/dev/py/fieldz/msgImpl.py
# import sys # for debugging
from wireops.enum import FieldTypes, PrimTypes
from wireops.raw import(length_as_varint, field_hdr_len, read_field_hdr,
write_raw_varint, read_raw_varint,
write_field_hdr)
from wireops.typed import T_GET_FUNCS, T_LEN_FUNCS, T_PUT_FUNCS
from fieldz import FieldzError
from fieldz.enum import Quants
from fieldz.field_impl import make_field_class
__all__ = ['make_msg_class', 'make_field_class', 'write', 'impl_len', ]
# SERIALIZATION METHODS ---------------------------------------------
# This interface should be compatible with registry {put,get,len}Func but
# is NOT. SHOULD REPLACE buf, pos WITH chan IN ALL PARAMETER LISTS
def impl_len(msg, nnn):
"""
msg is a reference to an instance of the MsgImpl class, n is its
field number. Returns the int length of the serialized object,
including the lenght of the field header.
"""
return msg.wire_len(nnn)
def _check_position(chan, end):
if chan.position > end:
err_msg = "read beyond end of buffer: position := %d, end is %d" % (
chan.position, end)
raise RuntimeError(err_msg)
# -------------------------------------------------------------------
# CODE FRAGMENTS: METHODS USED AS COMPONENTS IN BUILDING CLASSES
# -------------------------------------------------------------------
# pylint: disable=unused-argument
def write(self):
raise NotImplementedError
def my_getter(self):
raise NotImplementedError
def my_wire_len(self):
print("DEBUG: myWireLen invoked")
raise NotImplementedError
def my_p_wire_len(self, nnn): # field number for nested msg, regID otherwise
raise NotImplementedError
# specific to messages ----------------------------------------------
def my_enums(self):
#pylint: disable=protected-access
return self._enums
def my_msgs(self):
#pylint: disable=protected-access
return self._msgs
def my_field_classes(self):
#pylint: disable=protected-access
return self._field_classes
# specific to fields ------------------------------------------------
# FOR A GIVEN FIELD, THESE ARE CONSTANTS ASSIGNED BY make_field_class
#
#
# def myFType(cls): return cls._fType
#
#
# def myQuantifier(cls): return cls._quantifier
#
#
# def myFieldNbr(cls): return cls._fieldNbr
#
#
# def myDefault(cls): return cls._default
#
# these get and set the value attribute of the field instance; they
# have nothing to do with de/serialization to and from the channel
#
#
# def myValueGetter(self): return self._value
# XXX TYPE-SPECIFIC VALIDATION, COERCION:
#
#
# def myValueSetter(self, value): self._value = value # GEEP
# -------------------------------------------------------------------
# MESSAGE CLASS
# -------------------------------------------------------------------
# WAS OF TYPE 'type' 2016-08-02
class MsgImpl(object):
"""
An abstract class intended to serve as parent to automatically
generated classes whose purpose will be to ease user handling
of data being sent or received across the wire.
"""
# DISABLE __slots__ until better understood
# __slots__ = ['_field_classes', # list of field instances
# # '_fields_by_name',
# '_enums', # nested enums
# '_msgs', # nested messages
# ]
def __init__(self, mname, field_classes=None, enums=None, msgs=None):
self._mname = mname
self._field_classes = field_classes
self._enums = enums
self._msgs = msgs
self._parent_spec = None
# EXPERIMENT 2018-01-05
@property
def mname(self):
return self._mname
# END EXPERIMENT
def __eq__(self, other):
if other is None:
return False
if self is other:
return True
if self._mname != other.mname:
return False
# print "MESSAGE NAMES THE SAME" # DEBUG
# -- compare fields -------------------------------
if self._field_classes is None or other.field_classes is None:
return False
# print "SAME NUMBER OF FIELDS" # DEBUG
if len(self._field_classes) != len(other.field_classes):
return False
for i in range(len(self._field_classes)):
if self._field_classes[i] != other.field_classes[i]:
# DEBUG
print("MESSAGE FIELDS %d DIFFER" % i)
# END
return False
# print "FIELDS ARE THE SAME" # DEBUG
# -- compare nested enums -------------------------
if self._enums is None or other.enums is None:
return False
if len(self._enums) != len(other.enums):
return False
for i in range(len(self._enums)):
if self._enums[i] != other.enums[i]:
return False
# -- compare nested msgs --------------------------
if self._msgs is None or other.msgs is None:
return False
if len(self._msgs) != len(other.msgs):
return False
for i in range(len(self._msgs)):
if self._msgs[i] != other.msgs[i]:
return False
return True
def __len__(self):
return len(self._field_classes)
def __getitem__(self, nnn):
# 2016-08-02, same fix
# return self._fields[n]
return self._field_classes[nnn]
# -- INSTANCE SERIALIZATION -------------------------------------
# INSTANCE PUT ----------------------------------------
def write_stand_alone(self, chan):
"""
Write the message stand-alone, as the topmost message on the
channel. Returns the message index as a convenience in testing.
"""
mname = self._mname
ndx = self._parent_spec.msg_name_index(mname)
# DEBUG
print("WRITE_STAND_ALONE: MSG %s INDEX IS %d" % (mname, ndx))
# END
self.write(chan, ndx)
return ndx
def write(self, chan, nnn):
"""
n is the msg's field number OR regID
"""
write_field_hdr(
chan,
nnn,
PrimTypes.LEN_PLUS) # write the field header
msg_len = self._wire_len() # then the unprefixed length
write_raw_varint(chan, msg_len)
# XXX DEBUG
print("ENTERING MsgImpl.write FIELD NBR " +
"%u, MSG LEN IS %u; AFTER WRITING HDR OFFSET %u" % (
nnn, msg_len, chan.position))
# XXX This only makes sense for simple messages all of whose
# fields are required and so have only a single instance
for field in self._field_classes: # instances with a value attr
# CLASS-LEVEL SLOTS are '_name', '_fType', '_quantifier',
# '_fieldNbr', '_default',]
# INSTANCE-LEVEL SLOT is '_value'
#pylint: disable=protected-access
f_name = field._name
f_nbr = field.field_nbr
f_quant = field.quantifier # NEXT HURDLE
field_type = field.field_type
value = field.value
# default = field.default
# pylint: disable=no-member
if f_quant == Quants.REQUIRED or f_quant == Quants.OPTIONAL:
if field_type > 23:
# DEBUG
reg = self.msg_spec.reg
print("RECURSING TO WRITE FIELD %u TYPE %s" % (
f_nbr, reg.reg_id2name(field_type)))
# END
value.write(chan, f_nbr)
else:
# DEBUG
display_val = value
if field_type == FieldTypes.L_STRING and len(
display_val) > 16:
display_val = display_val[:16] + '...'
print("WRITING FIELD %u TYPE %u VALUE %s" % (
f_nbr, field_type, display_val))
# END
T_PUT_FUNCS[field_type](chan, value, f_nbr)
elif f_quant == Quants.PLUS or f_quant == Quants.STAR:
v_list = value
for varint_ in v_list:
# WORKING HERE
if field_type > 23:
# DEBUG
reg = self.msg_spec.reg
print("RECURSING TO WRITE FIELD %u TYPE %s" % (
f_nbr, reg.reg_id2name(field_type)))
# END
# this function recursing
varint_.write(chan, f_nbr)
else:
T_PUT_FUNCS[field_type](chan, varint_, f_nbr)
else:
raise RuntimeError(
"field '%s' has unknown quantifier '%s'" % (
f_name, f_quant)) # GEEP
# # DEBUG
# print "AFTER WRITING ENTIRE MESSAGE OFFSET IS %d" % chan.position
# # END
# -- INSTANCE GET -------------------------------------
@classmethod
def read(cls, chan, parent_spec):
"""msg refers to the msg, n is field number; returns msg, n"""
(p_type, nnn) = read_field_hdr(chan)
if nnn < 0 or nnn >= len(parent_spec.msgs):
raise RuntimeError("msg ID '%s' out of range" % nnn)
msg_spec = parent_spec.msgs[nnn]
msg_len = read_raw_varint(chan)
# DEBUG
print("IMPL_GETTER, P_TYPE %d, MSG/FIELD NBR %d, MSG_LEN %d" % (
p_type, nnn, msg_len))
# END
end = chan.position + msg_len
cls = _make_msg_class(parent_spec, msg_spec) # generated class
field_classes = [] # ???
values = [] # ???
# XXX THIS IS NOT GOING TO WORK, BECAUSE WE NEED TO PEEK XXX
# pylint: disable=no-member
for f_class in cls._field_classes:
#pylint: disable=protected-access
f_quant = f_class._quantifier
field_type = f_class._field_type # a number
#pylint: disable=protected-access
f_quant = f_class._quantifier
field_nbr = f_class._field_nbr
# read the field header
(p_type, nbr) = read_field_hdr(chan)
# DEBUG
print(
" GET_FROM_CHAN, FIELD %u, TYPE %u" %
(field_nbr, field_type))
# END
if field_nbr != nbr:
raise RuntimeError(" EXPECTED FIELD_NBR %d, GOT %d" % (
field_nbr, nbr))
# pylint: disable=no-member
if f_quant == Quants.REQUIRED or f_quant == Quants.OPTIONAL:
if field_type > 23:
reg = cls.msg_spec.reg
# BEGIN JUNK ------------------------------------
# DEBUG
print(
"READING: FIELD TYPE IS %s" %
reg.reg_id2name(field_type))
# END
entry = reg.reg_id2entry(field_type)
print("READING: FIELD TYPE bis IS %s" % entry.name)
# END JUNK --------------------------------------
# child_spec = entry.msg_spec
# child_class = _make_msg_class(msg_spec, child_spec)
# RECURSE: read(childCls, chan, msgSpec)
# msgSpec is parentSpec here
value = T_GET_FUNCS[field_type](chan) # XXX WRONG
else:
value = T_GET_FUNCS[field_type](chan)
_check_position(chan, end)
values.append(value)
elif f_quant == Quants.PLUS or f_quant == Quants.STAR:
# v_list = [] # we are reading a list of values
# WORKING HERE
pass
else:
raise RuntimeError("unknown quantifier, index '%u'" % f_quant)
# DEBUG
print("AFTER COLLECTING %u FIELDS, OFFSET IS %u" % (
len(field_classes), chan.position))
# END
# XXX BLOWS UP: can't handle Quants.PLUS or Quants.STAR (about line
# 407)
return (cls(values), nnn) # GEEP
# -- INSTANCE SERIALIZED LENGTH -----------------------
def _wire_len(self):
"""
Returns the length of the body of a serialized message, excluding
the header.
"""
msg_len = 0
nnn = 0 # DEBUG
for field in self._field_classes:
f_name = field.fname
f_nbr = field.field_nbr
f_quant = field.quantifier # NEXT HURDLE
field_type = field.field_type
value = field.value
# XXX What follows doesn't quite make sense. If a REQUIRED
# message is missing, we simply won't find it. Likewise
# for Quants.STAR
# pylint: disable=no-member
if f_quant == Quants.REQUIRED or f_quant == Quants.OPTIONAL:
contrib = T_LEN_FUNCS[field_type](value, f_nbr)
# DEBUG
if field_type > 23:
reg = self.msg_spec.reg # or protocol reg?
# XXX is the registry for the protocol? msgSpec?
print(" F_TYPE %u IS MSG | |
<reponame>markortleb/asset_data_analysis
from datetime import datetime, timedelta
from pymongo import MongoClient
import requests
import argparse
import json
import yaml
import os
import pandas as pd
import math
import matplotlib.pyplot as plt
import matplotlib
import mplfinance as mpl
def get_daily_candles_from_minute_data(minute_data_df):
candle_values = {
'candle_1': {'lower_limit_date': '09:30', 'upper_limit_date': '15:59', 'found_hard_open': False, 'found_hard_close': False}
}
hourly_data_df = pd.DataFrame(
columns=[
'High',
'Low',
'Open',
'Close',
'Volume'
]
)
# Load Thirty Minute Candles from Minute Candles
for df_row in minute_data_df.iterrows():
df_row_dict = df_row[1].to_dict()
if 'error' in df_row_dict:
if df_row_dict['error'] == 'HOLIDAY':
continue
# print(type(df_row_dict['marketVolume']))
# print(df_row_dict)
for k, v in candle_values.items():
if v['lower_limit_date'] <= df_row_dict['minute'] <= v['upper_limit_date']:
if df_row_dict['minute'] == v['lower_limit_date']:
v['found_hard_open'] = True
v['open_time'] = df_row_dict['minute']
if 'marketOpen'in df_row_dict and not math.isnan(df_row_dict['marketOpen']):
v['Open'] = df_row_dict['marketOpen']
else:
v['Open'] = df_row_dict['open']
elif not v['found_hard_open']:
if 'Open' not in v:
v['open_time'] = df_row_dict['minute']
if 'marketOpen'in df_row_dict and not math.isnan(df_row_dict['marketOpen']):
v['Open'] = df_row_dict['marketOpen']
else:
v['Open'] = df_row_dict['open']
elif df_row_dict['minute'] > v['open_time']:
v['open_time'] = df_row_dict['minute']
if 'marketOpen'in df_row_dict and not math.isnan(df_row_dict['marketOpen']):
v['Open'] = df_row_dict['marketOpen']
else:
v['Open'] = df_row_dict['open']
if df_row_dict['minute'] == v['upper_limit_date']:
v['found_hard_close'] = True
v['close_time'] = df_row_dict['minute']
if 'marketClose'in df_row_dict and not math.isnan(df_row_dict['marketClose']):
v['Close'] = df_row_dict['marketClose']
else:
v['Close'] = df_row_dict['close']
elif not v['found_hard_close']:
if 'Close' not in v:
v['close_time'] = df_row_dict['minute']
if 'marketClose'in df_row_dict and not math.isnan(df_row_dict['marketClose']):
v['Close'] = df_row_dict['marketClose']
else:
v['Close'] = df_row_dict['close']
elif df_row_dict['minute'] > v['close_time']:
v['close_time'] = df_row_dict['minute']
if 'marketClose'in df_row_dict and not math.isnan(df_row_dict['marketClose']):
v['Close'] = df_row_dict['marketClose']
else:
v['Close'] = df_row_dict['close']
if 'Low' not in v:
if 'marketLow'in df_row_dict and not math.isnan(df_row_dict['marketLow']):
v['Low'] = df_row_dict['marketLow']
else:
v['Low'] = df_row_dict['low']
else:
if 'marketLow'in df_row_dict and not math.isnan(df_row_dict['marketLow']):
if df_row_dict['marketLow'] < v['Low']:
v['Low'] = df_row_dict['marketLow']
else:
if df_row_dict['low'] < v['Low']:
v['Low'] = df_row_dict['low']
if 'High' not in v:
if 'marketHigh'in df_row_dict and not math.isnan(df_row_dict['marketHigh']):
v['High'] = df_row_dict['marketHigh']
else:
v['High'] = df_row_dict['high']
else:
if 'marketHigh'in df_row_dict and not math.isnan(df_row_dict['marketHigh']):
if df_row_dict['marketHigh'] > v['High']:
v['High'] = df_row_dict['marketHigh']
else:
if df_row_dict['high'] > v['High']:
v['High'] = df_row_dict['high']
if 'Volume' not in v:
if 'marketVolume'in df_row_dict and not math.isnan(df_row_dict['marketVolume']):
v['Volume'] = df_row_dict['marketVolume']
else:
v['Volume'] = df_row_dict['volume']
else:
if 'marketVolume'in df_row_dict and not math.isnan(df_row_dict['marketVolume']):
v['Volume'] = v['Volume'] + df_row_dict['marketVolume']
else:
v['Volume'] = v['Volume'] + df_row_dict['volume']
v['Date'] = df_row_dict['date']
for k, v in candle_values.items():
if 'Date' in v:
timestamp_val = datetime.strptime(v['Date'] + ' ' + v['lower_limit_date'] + ':00', '%Y-%m-%d %H:%M:%S')
# timestamp_val = v['Date'] + ' ' + v['lower_limit_date'] + ':00'
appendage = pd.Series({
'Open': v['Open'],
'Close': v['Close'],
'High': v['High'],
'Low': v['Low'],
'Volume': v['Volume']
})
# print(appendage)
hourly_data_df.loc[timestamp_val] = appendage
hourly_data_df['Date'] = hourly_data_df.index
# hourly_data_df.append(appendage)
# hourly_data_df.index.name = 'Date'
return hourly_data_df
def get_thirty_minute_candles_from_minute_data(minute_data_df):
candle_values = {
'candle_1': {'lower_limit_date': '09:30', 'upper_limit_date': '09:59', 'found_hard_open': False, 'found_hard_close': False},
'candle_2': {'lower_limit_date': '10:00', 'upper_limit_date': '10:29', 'found_hard_open': False, 'found_hard_close': False},
'candle_3': {'lower_limit_date': '10:30', 'upper_limit_date': '10:59', 'found_hard_open': False, 'found_hard_close': False},
'candle_4': {'lower_limit_date': '11:00', 'upper_limit_date': '11:29', 'found_hard_open': False, 'found_hard_close': False},
'candle_5': {'lower_limit_date': '11:30', 'upper_limit_date': '11:59', 'found_hard_open': False, 'found_hard_close': False},
'candle_6': {'lower_limit_date': '12:00', 'upper_limit_date': '12:29', 'found_hard_open': False, 'found_hard_close': False},
'candle_7': {'lower_limit_date': '12:30', 'upper_limit_date': '12:59', 'found_hard_open': False, 'found_hard_close': False},
'candle_8': {'lower_limit_date': '13:00', 'upper_limit_date': '13:29', 'found_hard_open': False, 'found_hard_close': False},
'candle_9': {'lower_limit_date': '13:30', 'upper_limit_date': '13:59', 'found_hard_open': False, 'found_hard_close': False},
'candle_10': {'lower_limit_date': '14:00', 'upper_limit_date': '14:29', 'found_hard_open': False, 'found_hard_close': False},
'candle_11': {'lower_limit_date': '14:30', 'upper_limit_date': '14:59', 'found_hard_open': False, 'found_hard_close': False},
'candle_12': {'lower_limit_date': '15:00', 'upper_limit_date': '15:29', 'found_hard_open': False, 'found_hard_close': False},
'candle_13': {'lower_limit_date': '15:30', 'upper_limit_date': '15:59', 'found_hard_open': False, 'found_hard_close': False}
}
hourly_data_df = pd.DataFrame(
columns=[
'High',
'Low',
'Open',
'Close',
'Volume'
]
)
# Load Thirty Minute Candles from Minute Candles
for df_row in minute_data_df.iterrows():
df_row_dict = df_row[1].to_dict()
if 'error' in df_row_dict:
if df_row_dict['error'] == 'HOLIDAY':
continue
# print(type(df_row_dict['marketVolume']))
# print(df_row_dict)
for k, v in candle_values.items():
if v['lower_limit_date'] <= df_row_dict['minute'] <= v['upper_limit_date']:
if df_row_dict['minute'] == v['lower_limit_date']:
v['found_hard_open'] = True
v['open_time'] = df_row_dict['minute']
if 'marketOpen'in df_row_dict and not math.isnan(df_row_dict['marketOpen']):
v['Open'] = df_row_dict['marketOpen']
else:
v['Open'] = df_row_dict['open']
elif not v['found_hard_open']:
if 'Open' not in v:
v['open_time'] = df_row_dict['minute']
if 'marketOpen'in df_row_dict and not math.isnan(df_row_dict['marketOpen']):
v['Open'] = df_row_dict['marketOpen']
else:
v['Open'] = df_row_dict['open']
elif df_row_dict['minute'] > v['open_time']:
v['open_time'] = df_row_dict['minute']
if 'marketOpen'in df_row_dict and not math.isnan(df_row_dict['marketOpen']):
v['Open'] = df_row_dict['marketOpen']
else:
v['Open'] = df_row_dict['open']
if df_row_dict['minute'] == v['upper_limit_date']:
v['found_hard_close'] = True
v['close_time'] = df_row_dict['minute']
if 'marketClose'in df_row_dict and not math.isnan(df_row_dict['marketClose']):
v['Close'] = df_row_dict['marketClose']
else:
v['Close'] = df_row_dict['close']
elif not v['found_hard_close']:
if 'Close' not in v:
v['close_time'] = df_row_dict['minute']
if 'marketClose'in df_row_dict and not math.isnan(df_row_dict['marketClose']):
v['Close'] = df_row_dict['marketClose']
else:
v['Close'] = df_row_dict['close']
elif df_row_dict['minute'] > v['close_time']:
v['close_time'] = df_row_dict['minute']
if 'marketClose'in df_row_dict and not math.isnan(df_row_dict['marketClose']):
v['Close'] = df_row_dict['marketClose']
else:
v['Close'] = df_row_dict['close']
if 'Low' not in v:
if 'marketLow'in df_row_dict and not math.isnan(df_row_dict['marketLow']):
v['Low'] = df_row_dict['marketLow']
else:
v['Low'] = df_row_dict['low']
else:
if 'marketLow'in df_row_dict and not math.isnan(df_row_dict['marketLow']):
if df_row_dict['marketLow'] < v['Low']:
v['Low'] = df_row_dict['marketLow']
else:
if df_row_dict['low'] < v['Low']:
v['Low'] = df_row_dict['low']
if 'High' not in v:
if 'marketHigh'in df_row_dict and not math.isnan(df_row_dict['marketHigh']):
v['High'] = df_row_dict['marketHigh']
else:
v['High'] = df_row_dict['high']
else:
if 'marketHigh'in df_row_dict and not math.isnan(df_row_dict['marketHigh']):
if df_row_dict['marketHigh'] > v['High']:
v['High'] = df_row_dict['marketHigh']
else:
if df_row_dict['high'] > v['High']:
v['High'] = df_row_dict['high']
if 'Volume' not in v:
if 'marketVolume'in df_row_dict and not math.isnan(df_row_dict['marketVolume']):
v['Volume'] = df_row_dict['marketVolume']
else:
v['Volume'] = df_row_dict['volume']
else:
if 'marketVolume'in df_row_dict and not math.isnan(df_row_dict['marketVolume']):
v['Volume'] = v['Volume'] + df_row_dict['marketVolume']
else:
v['Volume'] = v['Volume'] + df_row_dict['volume']
v['Date'] = df_row_dict['date']
for k, v in candle_values.items():
if 'Date' in v:
timestamp_val = datetime.strptime(v['Date'] + ' ' + v['lower_limit_date'] + ':00', '%Y-%m-%d %H:%M:%S')
# timestamp_val = v['Date'] + ' ' + v['lower_limit_date'] + ':00'
appendage = pd.Series({
'Open': v['Open'],
'Close': v['Close'],
'High': v['High'],
'Low': v['Low'],
'Volume': v['Volume']
})
# print(appendage)
hourly_data_df.loc[timestamp_val] = appendage
hourly_data_df['Date'] = hourly_data_df.index
# hourly_data_df.append(appendage)
# hourly_data_df.index.name = 'Date'
return hourly_data_df
def get_daily_df(ticker_symbol):
database = 'asset_data_lake'
collection_name = f'asset_stock_{ticker_symbol.lower()}_minutely'
mongo_client = MongoClient('localhost', 27017)
asset_data_lake = mongo_client[database]
collection = asset_data_lake[collection_name]
current_date = datetime.now()
last_30_days_date = (current_date - timedelta(days=20))
iter_date = last_30_days_date
out_data_list = []
while iter_date <= current_date:
collection_cursor = collection.find(
{
'date': {
'$eq': iter_date.strftime('%Y-%m-%d')
}
}
)
collection_df = pd.DataFrame(list(collection_cursor))
if not collection_df.empty and 'error' not in collection_df.columns:
collection_df = remove_nans(collection_df)
iter_ticker_df = get_daily_candles_from_minute_data(collection_df)
thirty_minute_candles = iter_ticker_df.to_dict('records')
out_data_list = out_data_list + thirty_minute_candles
iter_date = iter_date + timedelta(days=1)
ticker_df = pd.DataFrame.from_dict(out_data_list)
ticker_df.index.name = 'Date'
ticker_df.index = ticker_df['Date']
del ticker_df['Date']
return ticker_df
def get_thirty_minute_df(ticker_symbol):
database = 'asset_data_lake'
collection_name = f'asset_stock_{ticker_symbol.lower()}_minutely'
mongo_client = MongoClient('localhost', 27017)
asset_data_lake = mongo_client[database]
collection = asset_data_lake[collection_name]
current_date = datetime.now()
last_30_days_date = (current_date - timedelta(days=10))
iter_date = last_30_days_date
out_data_list = []
while iter_date <= current_date:
collection_cursor = collection.find(
{
'date': {
'$eq': iter_date.strftime('%Y-%m-%d')
}
}
)
collection_df = pd.DataFrame(list(collection_cursor))
if not collection_df.empty and 'error' not in collection_df.columns:
collection_df = remove_nans(collection_df)
iter_ticker_df = get_thirty_minute_candles_from_minute_data(collection_df)
thirty_minute_candles = iter_ticker_df.to_dict('records')
out_data_list = out_data_list + thirty_minute_candles
iter_date = iter_date + timedelta(days=1)
ticker_df = pd.DataFrame.from_dict(out_data_list)
ticker_df.index.name = 'Date'
ticker_df.index = ticker_df['Date']
del ticker_df['Date']
return ticker_df
def calculate_fibonacci_pivot_points(intraday_ticker_df, daily_ticker_df):
pivot_points = []
for index, row in daily_ticker_df.iterrows():
pivot_point = (row['High'] + row['Low'] + row['Close']) / 3.0
pivot_points.append(
{
'date': index,
'resistance_3': pivot_point + ((row['High'] - row['Low']) * 1.0),
'resistance_2': pivot_point + ((row['High'] - row['Low']) * 0.618),
'resistance_1': pivot_point + ((row['High'] - row['Low']) * 0.382),
'pivot_point': pivot_point,
'support_1': pivot_point - ((row['High'] - row['Low']) * 0.382),
'support_2': pivot_point - ((row['High'] - row['Low']) * 0.618),
'support_3': pivot_point - ((row['High'] - row['Low']) * 1.0)
}
)
date_list = list(set(daily_ticker_df.index.tolist()))
date_list.sort(reverse=False)
date_increment_list = []
for i in range(len(date_list)):
if i + 1 != len(date_list):
date_increment_list.append(
{
'cur_date': date_list[i].strftime('%Y-%m-%d'),
'next_date': date_list[i + 1].strftime('%Y-%m-%d')
}
)
else:
# Just use next day otherwise
date_increment_list.append(
{
'cur_date': date_list[i].strftime('%Y-%m-%d'),
'next_date': (date_list[i] + timedelta(days=1)).strftime('%Y-%m-%d')
}
)
last_row_df = intraday_ticker_df.iloc[-1]
last_close = float(last_row_df['Close'])
last_volume = float(last_row_df['Volume'])
padded_intraday_dicts = []
intraday_times = list(set(intraday_ticker_df.index.tolist()))
applied_pivot_points = []
for index, row in intraday_ticker_df.iterrows():
for pivot_point_data in pivot_points:
if pivot_point_data['date'].strftime('%Y-%m-%d') == index.strftime('%Y-%m-%d'):
new_date = None
for increment_map in date_increment_list:
if increment_map['cur_date'] == index.strftime('%Y-%m-%d'):
new_date = datetime.strptime(increment_map['next_date'] + ' ' + index.strftime('%H:%M:%S'), '%Y-%m-%d %H:%M:%S')
applied_pivot_points.append(
{
'Date': new_date,
'resistance_3': pivot_point_data['resistance_3'],
'resistance_2': pivot_point_data['resistance_2'],
'resistance_1': pivot_point_data['resistance_1'],
'pivot_point': pivot_point_data['pivot_point'],
'support_1': pivot_point_data['support_1'],
'support_2': pivot_point_data['support_2'],
'support_3': pivot_point_data['support_3'],
}
)
if new_date not in intraday_times:
padded_intraday_dicts.append(
{
'Date': new_date,
'High': last_close,
| |
<reponame>cloudysunny14/faucet<gh_stars>1-10
#!/usr/bin/env python
#
# Copyright 2014 cloudysunny14.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.base import app_manager
from ryu.exception import OFPUnknownVersion
from ryu.exception import RyuException
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.lib.ovs import bridge
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.of_config.capable_switch import OFCapableSwitch
import ryu.lib.of_config.classes as ofc
IPV4 = ipv4.ipv4.__name__
OUTPUT_CONTROLLER = 0xfffffffd
ARP_FLOW_PRIORITY = ofproto_v1_3_parser.UINT16_MAX - 1
OUTPUT_FLOW_PRIORITY = ofproto_v1_3_parser.UINT16_MAX >> 1
LISTED_FLOW_PRIORITY = ofproto_v1_3_parser.UINT16_MAX >> 2
# Built-in chain
# [INPUT] - [OUTPUT] - [POST-ROUTING]
MANGLE_INPUT_TABLE_ID = 0
MANGLE_PRE_ROUTING_TABLE_ID = 1
MANGLE_OUTPUT_TABLE_ID = 2
MANGLE_POST_ROUTING_TABLE_ID = 3
MANGLE_DEFAULT_PRIORITY = 0
MANGLE_DEFAULT_COOKIE = 0
MANGLE_DEFAULT_COOKIE_MASK = 0xFFFFFFFF
MANGLE_DEFAULT_TIMEOUT = 0
MANGLE_ACTION = 'action'
MANGLE_ACTION_ACCEPT = 'accept'
MANGLE_ACTION_DENY = 'deny'
MANGLE_ACTION_CONTROLLER = 'controller'
MANGLE_ACTION_MARK_PACKET = 'mark-packet'
MANGLE_JUMP = 'jump'
MANGLE_QUEUE = 'queue'
MANGLE_PRIORITY = 'priority'
MANGLE_JUMP_TARGET = 'jump-target'
MANGLE_DST_ADDRESS = 'dst-address'
MANGLE_SRC_ADDRESS = 'src-address'
MANGLE_PROTOCOL = 'protocol'
MANGLE_LIMIT = 'limit'
MANGLE_NEW_PACKET_MARK = 'new-packet-mark'
MANGLE_DST_ADDRESS_TYPE = 'dst-address-type'
MANGLE_SRC_ADDRESS_TYPE = 'src-address-type'
MANGLE_PACKET_MARK = 'packet-mark'
MANGLE_DST_MAC_ADDRESS = 'dst-mac-address'
MANGLE_SRC_MAC_ADDRESS = 'src-mac-address'
MANGLE_DST_PORT = 'dst-port'
MANGLE_CHAIN = 'chain'
MANGLE_CHAIN_INPUT = 'input'
MANGLE_CHAIN_OUTPUT = 'output'
MANGLE_CHAIN_POST_ROUTING = 'post-routing'
MANGLE_NW_PROTO_ICMP = 'icmp'
MANGLE_NW_PROTO_TCP = 'tcp'
MANGLE_NW_PROTO_UDP = 'udp'
# TODO: Compatible with VLAN_ID
# Cookie mask format
# (LSB) (MSB)
# +-------------+---------+---------+
# | 0 | 1 ~ 12 | 13 ~ 19 | 20 ~ 32 |
# +---+---------+---------+---------+
# |src| | | |
# |or | vlan_id | chain | list |
# |dst| | | |
# +---+---------+---------+---------+
# Note:src or dst bit is only uses add-address-list action,
# default value is 0.
MANGLE_ADDRESS_LIST_COOKIE_MASK = 0xFFF00000
MANGLE_CHAIN_COOKIE_MASK = 0xFE000
MANGLE_VLAN_ID_COOKIE_MASK = 0x1FFE
MANGLE_SD_COOKIE_MASK = 0x1
MANGLE_ADDRESS_LIST_SHIFT = 20
MANGLE_CHAIN_LIST_SHIFT = 13
LOG = logging.getLogger(__name__)
class QoSLib(app_manager.RyuApp):
""" Simple QoS library """
def __init__(self):
"""initialization."""
super(QoSLib, self).__init__()
self.name = 'qoslib'
#{datapath_id: switch}
self.switches = {}
#{datapath_id: {list_name: cookie}}
self.lists = {}
self.current_table_id = MANGLE_INPUT_TABLE_ID
self.waiters = {}
self.use_switch_flow = True
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
datapath = ev.msg.datapath
switch = self.switches.get(datapath.id)
if switch is not None:
switch.packet_in_handler(ev)
else:
switch = _Switch(datapath, self.waiters)
self.switches[datapath.id] = switch
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION or \
dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
# for OpenFlow version1.0
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_0(self, ev):
self.stats_reply_handler(ev)
# for OpenFlow version1.2 or later
@set_ev_cls(ofp_event.EventOFPStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_2(self, ev):
self.stats_reply_handler(ev)
@staticmethod
def mangle(datapath):
mangle = _Mangle(datapath)
return mangle
@staticmethod
def queue_tree(peer, datapath):
if isinstance(peer, OFCapableSwitch):
queue_tree = _QueueTreeOFConfig(peer, datapath)
else:
queue_tree = _QueueTreeOVSBridge(peer, datapath)
return queue_tree
def get_switch(self, datapath):
switch = self.switches.get(datapath.id, None)
if switch is None:
switch = _Switch(datapath, self.waiters)
self.switches[datapath.id] = switch
if self.use_switch_flow:
switch.set_arp_flow()
return switch
def add_mangle(self, mangle):
if mangle.is_built:
raise MangleAlreadyBuildError(mangle=mangle)
datapath = mangle.dp
switch = self.get_switch(datapath)
mangle.build(self.waiters, switch)
properties = mangle.properties
cookie_mask = MANGLE_DEFAULT_COOKIE_MASK
priority = properties.get(MANGLE_PRIORITY,
MANGLE_DEFAULT_PRIORITY)
hard_timeout = MANGLE_DEFAULT_TIMEOUT
action = _Action(mangle, switch)
matches = _Match(mangle, switch).to_openflow()
table_id = mangle.table_id
cookie = mangle.cookie
for match, add_next_table in matches:
actions = action.to_openflow()
if add_next_table:
goto_table_action = action.get_next_table()
if goto_table_action is not None:
actions.append(goto_table_action)
flow = self._to_of_flow(table_id, cookie, cookie_mask,
priority, match, actions,
hard_timeout)
mangle.send_flow_mod(flow)
if MANGLE_LIMIT in properties:
pass
def _to_of_flow(self, table_id, cookie, cookie_mask, priority,
match, actions, hard_timeout):
flow = {'cookie': cookie,
'cookie_mask': cookie_mask,
'priority': priority,
'table_id': table_id,
'flags': 0,
'idle_timeout': 0,
'hard_timeout': hard_timeout,
'match': match,
'actions': actions}
return flow
def register_queue(self, queue):
""""""
switch = self.get_switch(queue.datapath)
queue.edit_config(switch)
class MangleAlreadyBuildError(RyuException):
message = 'Mangle is already build : mangle=%(mangle)s'
class MangleAlreadyAddedListError(RyuException):
message = 'Mangle is already build : list_name=%(list_name)s,\
list=%(list)s'
class MangleBuildError(RyuException):
message = 'Mangle could\'n build : msg=%(msg)s mangle=%(mangle)s'
class MangleCommandError(RyuException):
message = 'Mangle command error : msg=%(msg)s'
class MangleInconsistencyError(RyuException):
message = 'Mangle unexpected inconsistency error : msg=%(msg)s'
class _Mangle(object):
def __init__(self, datapath):
self.dp = datapath
self.ofctl = _OFCtl.create_ofctl(datapath)
self.properties = {}
self.is_built = False
self.address_list_dict = {}
self.cookie = MANGLE_DEFAULT_COOKIE
self.table_id = MANGLE_INPUT_TABLE_ID
def add_property(self, p, v):
if self.is_built:
raise MangleAlreadyBuildError(mangle=self.properties)
self.properties[p] = v
return self
def address_list(self, list_name, address_list):
if len(self.address_list_dict):
name_list = self.address_list_dict.keys()[0]
add_list = self.address_list_dict[name_list]
raise MangleAlreadyAddedListError(list_name=name_list,
list=add_list)
self.address_list_dict[list_name] = address_list
def _validate_mangle(self, waiters, switch):
"""Validate mangle entry"""
# Search flow table.etc
mangle_chain = self.properties.get(MANGLE_CHAIN,
MANGLE_CHAIN_INPUT)
self.table_id = switch.chains_to_table_id(mangle_chain)
if MANGLE_ACTION_ACCEPT in self.properties:
table_id = self.properties.get(MANGLE_CHAIN, None)
if table_id == MANGLE_OUTPUT_TABLE_ID:
# Action accept is can't set to output chain.
self.properties[MANGLE_CHAIN] = MANGLE_INPUT_TABLE_ID
pass
if MANGLE_PACKET_MARK in self.properties:
mark = self.properties[MANGLE_PACKET_MARK]
value = switch.get_dscp_value(mark)
self.properties[MANGLE_PACKET_MARK] = value
if MANGLE_ACTION_MARK_PACKET in self.properties:
if MANGLE_NEW_PACKET_MARK not in self.properties:
return False, 'Action mark-packet required to specify\
new-packet-mark property'
return True, ''
def build(self, waiters, switch):
result, msg = self._validate_mangle(waiters, switch)
if not result:
raise MangleBuildError(msg=msg, mangle=self.properties)
self.is_built = True
return self
def send_flow_mod(self, flow):
cmd = self.dp.ofproto.OFPFC_ADD
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
class _Action(object):
""""""
def __init__(self, mangle, switch):
self.mangle = mangle
self.switch = switch
def _get_next_table_id(self):
properties = self.mangle.properties
value = properties[MANGLE_ACTION]
table_id = properties.get(MANGLE_CHAIN,
MANGLE_INPUT_TABLE_ID)
if value == MANGLE_ACTION_ACCEPT:
table_id = MANGLE_POST_ROUTING_TABLE_ID
elif MANGLE_JUMP in properties:
table_name = properties[MANGLE_JUMP]
if table_name == MANGLE_CHAIN_POST_ROUTING:
table_id = MANGLE_POST_ROUTING_TABLE_ID
else:
table_id = self.switch.chains_to_table_id(
properties[MANGLE_JUMP])
elif table_id == MANGLE_CHAIN_INPUT:
table_id = MANGLE_PRE_ROUTING_TABLE_ID
elif table_id == MANGLE_CHAIN_OUTPUT:
table_id = MANGLE_POST_ROUTING_TABLE_ID
return table_id
def get_next_table(self):
goto_table_id = self._get_next_table_id()
if goto_table_id != MANGLE_INPUT_TABLE_ID:
return {'type': 'GOTO_TABLE',
'table_id': goto_table_id}
return None
def to_openflow(self):
properties = self.mangle.properties
if MANGLE_ACTION not in properties:
raise MangleCommandError(msg='Must specify action.')
value = properties[MANGLE_ACTION]
LOG.info("value:%s", value)
actions = []
if value == MANGLE_ACTION_MARK_PACKET:
key = properties[MANGLE_NEW_PACKET_MARK]
value = self.switch.mark_packet(key)
actions.append({'type': 'SET_FIELD',
'field': 'ip_dscp',
'value': value})
if MANGLE_QUEUE in properties:
queue_name = properties[MANGLE_QUEUE]
queue_id = self.switch.get_queue_id(queue_name)
queue_action = {'type': 'SET_QUEUE',
'queue_id': queue_id}
if len(actions) > 0:
actions.append(queue_action)
else:
actions = [queue_action]
return actions
class _Match(object):
""""""
_CONVERT_PREREQ = {MANGLE_DST_ADDRESS:
{'dl_type': ether.ETH_TYPE_IP},
MANGLE_SRC_ADDRESS:
{'dl_type': ether.ETH_TYPE_IP},
MANGLE_PROTOCOL:
{'dl_type': ether.ETH_TYPE_IP},
MANGLE_PACKET_MARK:
{'dl_type': ether.ETH_TYPE_IP},
MANGLE_ACTION_MARK_PACKET:
{'dl_type': ether.ETH_TYPE_IP},
MANGLE_DST_PORT:
{'dl_type': ether.ETH_TYPE_IP,
'nw_proto': inet.IPPROTO_TCP}}
_CONVERT_KEY = {MANGLE_DST_ADDRESS: 'ipv4_dst',
MANGLE_SRC_ADDRESS: 'ipv4_src',
MANGLE_PROTOCOL: 'nw_proto',
MANGLE_PACKET_MARK: 'ip_dscp',
MANGLE_DST_MAC_ADDRESS: 'eth_dst',
MANGLE_SRC_MAC_ADDRESS: 'eth_src',
MANGLE_DST_PORT: 'tp_dst'}
_CONVERT_PROTOCOL = {MANGLE_NW_PROTO_TCP: inet.IPPROTO_TCP,
MANGLE_NW_PROTO_UDP: inet.IPPROTO_UDP,
MANGLE_NW_PROTO_ICMP: inet.IPPROTO_ICMP}
def __init__(self, mangle, switch):
self.mangle = mangle
self.switch = switch
def _validate_match(self, match_property):
if MANGLE_DST_ADDRESS in match_property:
True
return True
def convert_match(self, match_property):
match = {}
for key, value in match_property.items():
if key in _Match._CONVERT_PREREQ:
prereq = _Match._CONVERT_PREREQ[key]
match.update(prereq)
if key in _Match._CONVERT_KEY:
match_key = _Match._CONVERT_KEY[key]
match_value = \
_Match._CONVERT_PROTOCOL.get(match_property[key],
match_property[key])
match.update({match_key: match_value})
action = match_property.get(MANGLE_ACTION, None)
if action is not None:
if action in _Match._CONVERT_PREREQ:
prereq = _Match._CONVERT_PREREQ[action]
match.update(prereq)
return match
def to_openflow(self):
match_properties = self.mangle.properties
if not self._validate_match(match_properties):
return
matches = []
# In case of address list, not append match
# because match field only required to address lists
matches.append((self.convert_match(match_properties),
True))
return matches
class _Switch(object):
""" Switch """
def __init__(self,
datapath,
waiters,
chains={MANGLE_CHAIN_INPUT:
MANGLE_INPUT_TABLE_ID,
MANGLE_CHAIN_OUTPUT:
MANGLE_OUTPUT_TABLE_ID,
MANGLE_CHAIN_POST_ROUTING:
MANGLE_POST_ROUTING_TABLE_ID}):
self.datapath = datapath
self.waiters = waiters
self.ofctl = _OFCtl.create_ofctl(datapath)
self.chains = chains
self.mac_to_port = {}
#{queue_name: {resource_id: queue_id, ...}}
self.queues = {}
#{mark: dscp, ..}
self.dscp_mark_mapping = {}
self.current_dscp = 1
def set_arp_flow(self):
ofproto = self.datapath.ofproto
parser = self.datapath.ofproto_parser
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_ARP)
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(
datapath=self.datapath, priority=ARP_FLOW_PRIORITY,
match=match, instructions=inst)
self.datapath.send_msg(mod)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath,
table_id=MANGLE_OUTPUT_TABLE_ID,
priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
def _send_end_flows(self, datapath, in_port, out_port, eth):
parser = datapath.ofproto_parser
action_dst = [parser.OFPActionOutput(out_port)]
match = parser.OFPMatch(in_port=in_port, eth_dst=eth.dst)
self.add_flow(datapath, OUTPUT_FLOW_PRIORITY, match, action_dst)
match = parser.OFPMatch(in_port=out_port, eth_dst=eth.src)
action_src = [parser.OFPActionOutput(in_port)]
self.add_flow(datapath, OUTPUT_FLOW_PRIORITY, match, action_src)
LOG.info("END_FLOW")
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = | |
<reponame>TheV1rtuoso/debuggingbook<filename>docs/code/StatisticalDebugger.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Statistical Debugging" - a chapter of "The Debugging Book"
# Web site: https://www.debuggingbook.org/html/StatisticalDebugger.html
# Last change: 2022-01-24 10:47:23+01:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Debugging Book - Statistical Debugging
This file can be _executed_ as a script, running all experiments:
$ python StatisticalDebugger.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from debuggingbook.StatisticalDebugger import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.debuggingbook.org/html/StatisticalDebugger.html
This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.
To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`.
Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage.
### Collecting Events from Calls
To collect events from calls that are labeled manually, use
>>> debugger = TarantulaDebugger()
>>> with debugger.collect_pass():
>>> remove_html_markup("abc")
>>> with debugger.collect_pass():
>>> remove_html_markup('abc')
>>> with debugger.collect_fail():
>>> remove_html_markup('"abc"')
Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.)
### Collecting Events from Tests
To collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:
>>> debugger = TarantulaDebugger()
>>> with debugger:
>>> remove_html_markup("abc")
>>> with debugger:
>>> remove_html_markup('abc')
>>> with debugger:
>>> remove_html_markup('"abc"')
>>> assert False # raise an exception
`with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger.
### Visualizing Events as a Table
After collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.
>>> debugger.event_table(args=True, color=True)
| `remove_html_markup` | `s='abc'` | `s='abc'` | `s='"abc"'` |
| --------------------- | ---- | ---- | ---- |
| remove_html_markup:1 | X | X | X |
| remove_html_markup:2 | X | X | X |
| remove_html_markup:3 | X | X | X |
| remove_html_markup:4 | X | X | X |
| remove_html_markup:6 | X | X | X |
| remove_html_markup:7 | X | X | X |
| remove_html_markup:8 | - | X | - |
| remove_html_markup:9 | X | X | X |
| remove_html_markup:10 | - | X | - |
| remove_html_markup:11 | X | X | X |
| remove_html_markup:12 | - | - | X |
| remove_html_markup:13 | X | X | X |
| remove_html_markup:14 | X | X | X |
| remove_html_markup:16 | X | X | X |
### Visualizing Suspicious Code
If you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:
>>> debugger
1 def remove_html_markup(s): # type: ignore
2 tag = False
3 quote = False
4 out = ""
5
6 for c in s:
7 if c == '<' and not quote:
8 tag = True
9 elif c == '>' and not quote:
10 tag = False
11 elif c == '"' or c == "'" and tag:
12 quote = not quote
13 elif not tag:
14 out = out + c
15
16 return out
### Ranking Events
The method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.
>>> debugger.rank()
[('remove_html_markup', 12),
('remove_html_markup', 2),
('remove_html_markup', 14),
('remove_html_markup', 11),
('remove_html_markup', 3),
('remove_html_markup', 9),
('remove_html_markup', 6),
('remove_html_markup', 1),
('remove_html_markup', 7),
('remove_html_markup', 4),
('remove_html_markup', 16),
('remove_html_markup', 13),
('remove_html_markup', 8),
('remove_html_markup', 10)]
### Classes and Methods
Here are all classes defined in this chapter:

For more details, source, and documentation, see
"The Debugging Book - Statistical Debugging"
at https://www.debuggingbook.org/html/StatisticalDebugger.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'debuggingbook'
# Statistical Debugging
# =====================
if __name__ == '__main__':
print('# Statistical Debugging')
if __name__ == '__main__':
from .bookutils import YouTubeVideo
YouTubeVideo("UNuso00zYiI")
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Introduction
## ------------
if __name__ == '__main__':
print('\n## Introduction')
## Collecting Events
## -----------------
if __name__ == '__main__':
print('\n## Collecting Events')
from .Tracer import Tracer
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Set, List, TypeVar, Union
from types import FrameType, TracebackType
class Collector(Tracer):
"""A class to record events during execution."""
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collecting function. To be overridden in subclasses."""
pass
def events(self) -> Set:
"""Return a collection of events. To be overridden in subclasses."""
return set()
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
self.collect(frame, event, arg)
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
if __name__ == '__main__':
with Collector() as c:
out = remove_html_markup('"abc"')
out
Coverage = Set[Tuple[Callable, int]]
class Collector(Collector):
def __init__(self) -> None:
"""Constructor."""
self._function: Optional[Callable] = None
self._args: Optional[Dict[str, Any]] = None
self._argstring: Optional[str] = None
self._exception: Optional[Type] = None
self.items_to_ignore: List[Union[Type, Callable]] = [self.__class__]
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Tracing function.
Saves the first function and calls collect().
"""
for item in self.items_to_ignore:
if (isinstance(item, type) and 'self' in frame.f_locals and
isinstance(frame.f_locals['self'], item)):
# Ignore this class
return
if item.__name__ == frame.f_code.co_name:
# Ignore this function
return
if self._function is None and event == 'call':
# Save function
self._function = self.create_function(frame)
self._args = frame.f_locals.copy()
self._argstring = ", ".join([f"{var}={repr(self._args[var])}"
for var in self._args])
self.collect(frame, event, arg)
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collector function. To be overloaded in subclasses."""
pass
def id(self) -> str:
"""Return an identifier for the collector,
created from the first call"""
return f"{self.function().__name__}({self.argstring()})"
def function(self) -> Callable:
"""Return the function from the first call, as a function object"""
if not self._function:
raise ValueError("No call collected")
return self._function
def argstring(self) -> str:
"""
Return the list of arguments from the first call,
as a printable string
"""
if not self._argstring:
raise ValueError("No call collected")
return self._argstring
def args(self) -> Dict[str, Any]:
"""Return a dict of argument names and values from the first call"""
if not self._args:
raise ValueError("No call collected")
return self._args
def exception(self) -> Optional[Type]:
"""Return the exception class from the first call,
or None if no exception was raised."""
return self._exception
def __repr__(self) -> str:
"""Return a string representation of | |
<reponame>WLPhoenix/stackdio<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2014, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
from operator import or_
from os import listdir
from os.path import join, isfile
import zipfile
import StringIO
import salt.cloud
import envoy
import yaml
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from rest_framework import (
generics,
parsers,
permissions,
status,
)
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.reverse import reverse
from rest_framework.decorators import api_view
from django.conf import settings
from core.exceptions import BadRequest
from core.renderers import PlainTextRenderer
from core.permissions import (
AdminOrOwnerPermission,
AdminOrOwnerOrPublicPermission,
)
from volumes.api import VolumeListAPIView
from volumes.models import Volume
from blueprints.models import Blueprint, BlueprintHostDefinition
from cloud.providers.base import BaseCloudProvider
from cloud.models import SecurityGroup
from cloud.filters import SecurityGroupFilter
from . import tasks, models, serializers, filters, validators, workflows
logger = logging.getLogger(__name__)
class PublicStackMixin(object):
permission_classes = (permissions.IsAuthenticated,
AdminOrOwnerOrPublicPermission,)
def get_object(self):
obj = get_object_or_404(models.Stack, id=self.kwargs.get('pk'))
self.check_object_permissions(self.request, obj)
return obj
class StackPublicListAPIView(generics.ListAPIView):
model = models.Stack
serializer_class = serializers.StackSerializer
filter_class = filters.StackFilter
def get_queryset(self):
return self.model.objects \
.filter(public=True) \
.exclude(owner=self.request.user)
class StackAdminListAPIView(generics.ListAPIView):
"""
TODO: Add docstring
"""
model = models.Stack
serializer_class = serializers.StackSerializer
permission_classes = (permissions.IsAdminUser,)
filter_class = filters.StackFilter
def get_queryset(self):
return self.model.objects.all()
class StackListAPIView(generics.ListCreateAPIView):
"""
TODO: Add docstring
"""
model = models.Stack
serializer_class = serializers.StackSerializer
parser_classes = (parsers.JSONParser,)
filter_class = filters.StackFilter
ALLOWED_FIELDS = ('blueprint', 'title', 'description', 'properties',
'max_retries', 'namespace', 'auto_launch',
'auto_provision', 'parallel', 'public',
'simulate_launch_failures', 'simulate_zombies',
'simulate_ssh_failures', 'failure_percent',)
def get_queryset(self):
return self.request.user.stacks.all()
# TODO: Code complexity issues are ignored for now
def create(self, request, *args, **kwargs): # NOQA
"""
Overriding create method to build roles and metadata objects for this
Stack as well as generating the salt-cloud map that will be used to
launch machines
"""
logger.debug(request.DATA)
# make sure the user has a public key or they won't be able to SSH
# later
if not request.user.settings.public_key:
raise BadRequest('You have not added a public key to your user '
'profile and will not be able to SSH in to any '
'machines. Please update your user profile '
'before continuing.')
# Validate data
errors = {}
for k in request.DATA:
if k not in self.ALLOWED_FIELDS:
errors.setdefault('unknown_fields', []) \
.append('{0} is an unknown field.'.format(k))
if errors:
raise BadRequest(errors)
# REQUIRED PARAMS
blueprint_id = request.DATA.pop('blueprint', '')
title = request.DATA.get('title', '')
description = request.DATA.get('description', '')
# OPTIONAL PARAMS
properties = request.DATA.get('properties', {})
max_retries = request.DATA.get('max_retries', 2)
# UNDOCUMENTED PARAMS
# Skips launching if set to False
launch_stack = request.DATA.get('auto_launch', True)
provision_stack = request.DATA.get('auto_provision', True)
# Launches in parallel mode if set to True
parallel = request.DATA.get('parallel', True)
# See stacks.tasks::launch_hosts for information on these params
simulate_launch_failures = request.DATA.get('simulate_launch_failures',
False)
simulate_ssh_failures = request.DATA.get('simulate_ssh_failures',
False)
simulate_zombies = request.DATA.get('simulate_zombies', False)
failure_percent = request.DATA.get('failure_percent', 0.3)
# check for required blueprint
if not blueprint_id:
errors.setdefault('blueprint', []) \
.append('This field is required.')
else:
try:
blueprint = Blueprint.objects.get(pk=blueprint_id,
owner=request.user)
except Blueprint.DoesNotExist:
errors.setdefault('blueprint', []).append(
'Blueprint with id {0} does not exist.'.format(
blueprint_id))
except ValueError:
errors.setdefault('blueprint', []).append(
'This field must be an ID of an existing blueprint.')
if errors:
raise BadRequest(errors)
# Generate the title and/or description if not provided by user
if not title and not description:
extra_description = ' (Title and description'
elif not title:
extra_description = ' (Title'
elif not description:
extra_description = ' (Description'
else:
extra_description = ''
if extra_description:
extra_description += ' auto generated from Blueprint {0})' \
.format(blueprint.pk)
if not title:
request.DATA['title'] = '{0} ({1})'.format(
blueprint.title,
datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
)
if not description:
description = blueprint.description
request.DATA['description'] = description + '{0}' \
.format(extra_description)
# check for duplicates
if models.Stack.objects.filter(owner=self.request.user,
title=title).count():
errors.setdefault('title', []).append(
'A Stack with this title already exists in your account.'
)
if not isinstance(properties, dict):
errors.setdefault('properties', []).append(
'This field must be a JSON object.'
)
else:
# user properties are not allowed to provide a __stackdio__ key
if '__stackdio__' in properties:
errors.setdefault('properties', []).append(
'The __stackdio__ key is reserved for system use.'
)
# check for hostname collisions if namespace is provided
namespace = request.DATA.get('namespace')
hostdefs = blueprint.host_definitions.all()
hostnames = models.get_hostnames_from_hostdefs(
hostdefs,
username=request.user.username,
namespace=namespace)
if namespace:
# If the namespace was not provided, then there is no chance
# of collision within the database
# query for existing host names
# Leave this in so that we catch errors faster if they are local,
# Only hit up salt cloud if there are no duplicates locally
hosts = models.Host.objects.filter(hostname__in=hostnames)
if hosts.count():
errors.setdefault('duplicate_hostnames', []).extend(
[h.hostname for h in hosts]
)
if errors:
raise BadRequest(errors)
salt_cloud = salt.cloud.CloudClient(
join(settings.STACKDIO_CONFIG.salt_config_root, 'cloud'))
query = salt_cloud.query()
# Since a blueprint can have multiple providers
providers = set()
for bhd in hostdefs:
providers.add(bhd.cloud_profile.cloud_provider)
# Check to find duplicates
dups = []
for provider in providers:
provider_type = provider.provider_type.type_name
for instance, details in query.get(provider.slug, {}) \
.get(provider_type, {}).items():
if instance in hostnames:
if details['state'] not in ('shutting-down', 'terminated'):
dups.append(instance)
if dups:
errors.setdefault('duplicate_hostnames', dups)
if errors:
raise BadRequest(errors)
# create the stack and related objects
try:
logger.debug(request.DATA)
stack = models.Stack.objects.create_stack(request.user,
blueprint,
**request.DATA)
except Exception, e:
logger.exception(e)
raise BadRequest(str(e))
if launch_stack:
workflow = workflows.LaunchWorkflow(stack)
workflow.opts.provision = provision_stack
workflow.opts.parallel = parallel
workflow.opts.max_retries = max_retries
workflow.opts.simulate_launch_failures = simulate_launch_failures
workflow.opts.simulate_ssh_failures = simulate_ssh_failures
workflow.opts.simulate_zombies = simulate_zombies
workflow.opts.failure_percent = failure_percent
workflow.execute()
stack.set_status('queued', models.Stack.PENDING,
'Stack has been submitted to launch queue.')
# return serialized stack object
serializer = serializers.StackSerializer(stack, context={
'request': request,
})
return Response(serializer.data)
class StackDetailAPIView(PublicStackMixin,
generics.RetrieveUpdateDestroyAPIView):
model = models.Stack
serializer_class = serializers.StackSerializer
parser_classes = (parsers.JSONParser,)
def destroy(self, request, *args, **kwargs):
"""
Overriding the delete method to make sure the stack
is taken offline before being deleted
"""
# Update the status
stack = self.get_object()
if stack.status not in models.Stack.SAFE_STATES:
raise BadRequest('You may not delete this stack in its '
'current state. Please wait until it is finished '
'with the current action.')
msg = 'Stack will be removed upon successful termination ' \
'of all machines'
stack.set_status(models.Stack.DESTROYING,
models.Stack.DESTROYING, msg)
parallel = request.DATA.get('parallel', True)
# Execute the workflow
workflow = workflows.DestroyStackWorkflow(stack)
workflow.opts.parallel = parallel
workflow.execute()
# Return the stack while its deleting
serializer = self.get_serializer(stack)
return Response(serializer.data)
class StackPropertiesAPIView(PublicStackMixin, generics.RetrieveUpdateAPIView):
model = models.Stack
serializer_class = serializers.StackPropertiesSerializer
parser_classes = (parsers.JSONParser,)
def update(self, request, *args, **kwargs):
stack = self.get_object()
if not isinstance(request.DATA, dict):
raise BadRequest('Data must be JSON object of properties.')
if not request.DATA:
raise BadRequest('No properties were given.')
# update the stack properties
stack.properties = request.DATA
# Re-generate the pillar file too
stack._generate_pillar_file()
return Response(stack.properties)
class StackHistoryAPIView(PublicStackMixin, generics.ListAPIView):
model = models.StackHistory
serializer_class = serializers.StackHistorySerializer
def get_queryset(self):
stack = self.get_object()
return stack.history.all()
class StackActionAPIView(generics.SingleObjectAPIView):
model = models.Stack
serializer_class = serializers.StackSerializer
permission_classes = (permissions.IsAuthenticated,
AdminOrOwnerPermission,)
def get_object(self, queryset=None):
obj = get_object_or_404(models.Stack, id=self.kwargs.get('pk'))
self.check_object_permissions(self.request, obj)
return obj
def get(self, request, *args, **kwargs):
stack = self.get_object()
driver_hosts_map = stack.get_driver_hosts_map()
available_actions = set()
for driver, hosts in driver_hosts_map.iteritems():
available_actions.update(driver.get_available_actions())
return Response({
'available_actions': sorted(available_actions),
})
# TODO: Code complexity issues are ignored for now
def post(self, request, *args, **kwargs): # NOQA
"""
POST request allows RPC-like actions to be called to interact
with the stack. Request contains JSON with an `action` parameter
and optional `args` depending on the action being executed.
Valid actions: stop, start, restart, terminate, provision,
orchestrate
"""
stack = self.get_object()
if stack.status not in models.Stack.SAFE_STATES:
raise BadRequest('You may not perform an action while the '
'stack is in its current state.')
driver_hosts_map = stack.get_driver_hosts_map()
total_host_count = len(stack.get_hosts().exclude(instance_id=''))
action = request.DATA.get('action', None)
args = request.DATA.get('args', [])
if not action:
raise BadRequest('action is a required parameter.')
# check the individual provider for available actions
for driver, hosts in driver_hosts_map.iteritems():
available_actions = driver.get_available_actions()
if action not in available_actions:
raise BadRequest('At least one of the hosts in this stack '
'does not support the requested action.')
# All actions other than launch require hosts to be available
if action != BaseCloudProvider.ACTION_LAUNCH and total_host_count == 0:
raise BadRequest('The | |
import pygame
import numpy as np
import math
import gym
import torch
import random
import featureExtractor
import os
from nn import Neural_net as NN
random.seed(4)
_screen_height = 100
_screen_width = 100
_stripobsx = 0
_stripobsy = 20
_stripgoalx = 70
_stripgoaly = 20
_stripagentx = 80
_stripagenty = 10
_max_agent_speed = 5
weights = [1,1,1,1]
class Obstacle:
goals = None
def __init__(self,id,xpos = None , ypos = None , xvel = None , yvel = None, radius = None):
self.id = id
if xpos==None:
self.x = np.random.randint(_stripobsx,_screen_width-_stripobsx)
else:
self.x = xpos
if ypos==None:
self.y = np.random.randint(_stripobsy,_screen_height-_stripobsy)
else:
self.y = ypos
if radius==None:
self.rad = 20
else:
self.rad = radius
self.curr_goal = None #this is a tuple (x,y)
if xvel == None:
self.vel_x = 0
else:
self.vel_x = xvel
if yvel == None:
self.vel_y = 0
else:
self.vel_y = yvel
self.goal_change_counter = None
self.curr_counter = 0
class createBoardIRL():
#saved model contains the state dictionary and not the actual model
def __init__(self, saved_model = None, sensor_size = None, display = False ,number_of_actions = 4 , hidden_layers = [256 , 256] ,number_of_height = _screen_height , weights = weights , width = _screen_width , agent_radius = 10 , static_obstacles = 1 , dynamic_obstacles = 0 , static_obstacle_radius = 10 , dynamic_obstacle_radius = 0 , obstacle_speed_list = []):
pygame.init()
self.clock = pygame.time.Clock()
self.rewardWeights = weights #numpy array
self.display = display
self.gameExit = False
self.height = _screen_height
self.width = _screen_width
self.agent_radius = agent_radius
self.sensorArraysize = sensor_size
self.size_action_space = number_of_actions
self.max_agent_speed = _max_agent_speed
self.hidden_layers = hidden_layers
self.no_static_obstacles = static_obstacles
self.no_dynamic_obstacles = dynamic_obstacles
self.total_obs = self.no_static_obstacles+self.no_dynamic_obstacles
self.rad_static_obstacles = static_obstacle_radius
self.rad_dynamic_obstacles = dynamic_obstacle_radius
self.agent_action_flag = False
self.agent_action_keyboard = [False for i in range(4)]
self.agent_x = None
self.agent_y = None
self.agent_x_vel = 0
self.agent_y_vel = 0
self.goal_x = None
self.goal_y = None
self.old_dist = None
self.goal_threshold = 20
self.total_distance = None
self.obstacle_speed_list = obstacle_speed_list
self.static_obstacle_list = []
self.dynamic_obstacle_list = []
self.obstacle_list = []
self.sensor_readings = None #numpy array
if saved_model==None:
self.agentBrain = NN(self.sensorArraysize , self.hidden_layers,self.size_action_space)
self.agentBrain.cuda()
else:
self.agentBrain = NN(self.sensorArraysize , self.hidden_layers,self.size_action_space)
self.agentBrain.load_state_dict(torch.load(saved_model))
self.agentBrain.eval()
self.agentBrain.cuda()
#state : list
#state[0] - tuple containing agent current position
#state[1] - tuple containing goal position.
#state[2] - distance from goal
#state[3] - done?
#state[4 - end] - tuple obstacle position
self.state = None
self.reward = None
self.total_reward_accumulated = None
self.white = (255,255,255)
self.red = (255,0,0)
self.green = (0,255,0)
self.blue = (0,0,255)
self.black = (0,0,0)
self.caption = 'social navigation world'
print "initialization done."
def calculate_distance(self, tup1, tup2):
x_diff = tup1[0] - tup2[0]
y_diff = tup1[1] - tup2[1]
return math.sqrt(math.pow(x_diff, 2) + math.pow(y_diff, 2))
def check_overlap(self, tup1, tup2):
dist = self.calculate_distance(tup1, tup2)
if dist > (self.rad_static_obstacles + self.agent_radius):
return False
else:
return True
#this method is new for the IRL class
#this method takes in the state information generated by the environment at each step
#and converts it into sensory information format
#sensory information format is as follows:
#sensory dictionary :
# deviation - float ?? --remove as of now
# rel_vel - tuple (2)
# lidar_info - array [36]
# cur_vel - tuple(2)
# cur_pos - tuple(2)
# distance_from_goal - float
# distance_form_closest_obstacle - float
# action taken - tuple(2)
def state_to_sensorReadings(self):
sensor_readlist = []
#obstacle_info = self.state[3:]
closest_obs_dist , rel_vel = self.calculateDistanceFromClosestObstacle()
sensor_readlist.append(rel_vel[0])
sensor_readlist.append(rel_vel[1])
lidar_info = self.calculateLIDARInfo(100)
for i in range(lidar_info.size):
sensor_readlist.append(lidar_info[i])
sensor_readlist.append(self.agent_x_vel)
sensor_readlist.append(self.agent_y_vel)
sensor_readlist.append(self.agent_x)
sensor_readlist.append(self.agent_y)
sensor_readlist.append(self.state[2])
sensor_readlist.append(closest_obs_dist)
return np.asarray(sensor_readlist)
def calculateDistanceFromClosestObstacle(self):
min_dist = 10000
rel_x = None
rel_y = None
for i in range(len(self.obstacle_list)):
obs = self.obstacle_list[i]
dist = np.linalg.norm([(obs.x - self.agent_x),(obs.y - self.agent_y)])
if dist < min_dist:
min_dist = dist
rel_x = obs.vel_x - self.agent_x_vel
rel_y = obs.vel_y - self.agent_y_vel
return min_dist , (rel_x,rel_y)
#returns an array of size 36
def calculateLIDARInfo(self, lidar_range):
lidarHist = np.asarray([lidar_range for i in range(36)])
for i in range(len(self.obstacle_list)):
obs = self.obstacle_list[i]
dist = np.linalg.norm([(obs.x - self.agent_x),(obs.y - self.agent_y)])
bin = self.getBin(obs)
if dist < lidarHist[bin]:
lidarHist[bin] = dist
return lidarHist
def getBin(self, obs):
v1 = np.asarray([0,1])
v2 = np.asarray([obs.x - self.agent_x , obs.y - self.agent_y])
angle_diff = self.angle_between(v1,v2)
if v1[0]>v2[0]:
angle_diff = 2*math.pi - angle_diff
angle_diff = (angle_diff*360)/(2*math.pi)
angle_diff = angle_diff%360
return int(angle_diff//10)
def unit_vector(self,vector):
#print "vector"
return vector / np.linalg.norm(vector)
def angle_between(self,v1, v2):
#Returns the angle in radians between vectors 'v1' and 'v2'::
v1_u = self.unit_vector(v1)
v2_u = self.unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def take_action_from_userMouse(self):
(a, b, c) = pygame.mouse.get_pressed()
x = 0.0001
y = 0.0001
print "heresa"
#while (True):
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
print "heare"
self.agent_action_flag = True
if event.type == pygame.MOUSEBUTTONUP:
self.agent_action_flag = False
if self.agent_action_flag:
(x, y) = pygame.mouse.get_pos()
x = x - self.agent_x
y = y - self.agent_y
if np.hypot(x, y) > _max_agent_speed:
normalizer = _max_agent_speed / (np.hypot(x, y))
# print x,y
else:
normalizer = 1
print (x * normalizer, y * normalizer)
return (x * normalizer, y * normalizer)
return (0, 0)
#a simplified version where there are just 4actions
def take_action_from_userKeyboard(self):
while (True):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
self.agent_action_keyboard[0]=True
if key[pygame.K_RIGHT]:
self.agent_action_keyboard[1]=True
if key[pygame.K_LEFT]:
self.agent_action_keyboard[3]=True
if key[pygame.K_DOWN]:
self.agent_action_keyboard[2]=True
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
self.agent_action_keyboard[0]=False
if event.key == pygame.K_RIGHT:
self.agent_action_keyboard[1]=False
if event.key == pygame.K_LEFT:
self.agent_action_keyboard[3]=False
if event.key == pygame.K_DOWN:
self.agent_action_keyboard[2]=False
for i in range(len(self.agent_action_keyboard)):
if self.agent_action_keyboard[i]==True:
return i
return None
#returns the action that has the highest qvalue
def gen_action_from_agent(self):
qvalues = self.agentBrain(self.sensor_readings)
qvalues = qvalues.cpu().detach().numpy()
return np.argmax(qvalues)
#this method takes in the result from gen_action_from_agent()
#and converts into a tuple (x,y) where (x,y) is the amount the
#agent will move in the x and y direction respectively
def agent_action_to_WorldAction(self, action):
action_angle = 2*math.pi * action/self.size_action_space
action_vector = np.array([0, self.max_agent_speed])
rot_matrix = np.array([[math.cos(action_angle), -math.sin(action_angle)],[math.sin(action_angle), math.cos(action_angle)]])
action_taken = np.matmul(rot_matrix, action_vector)
return action_taken
#a simplified version: only 4 ways to move
#action (int) can attain values from 0-3, and based on that
#the agent moves left right front or back
def agent_action_to_WorldActionSimplified(self,action):
if action==0: #move front
return np.asarray([0,-5])
if action==1: #move right
return np.asarray([5,0])
if action==2: #move down
return np.asarray([0,5])
if action==3: #move left
return np.asarray([-5,0])
def check_overlap_rect(self, tup1, tup2, rad):
if abs(tup1[0] - tup2[0]) < (rad + self.agent_radius) and abs(tup1[1] - tup2[1]) < (
rad / 2 + self.rad_static_obstacles):
return True
else:
return False
def generate_randomval(self, lower, upper):
i = np.random.ranf()
return lower + i * (upper - lower)
def reset(self):
self.agent_action_flag = False
if self.display:
self.gameDisplay = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption('social navigation world')
self.gameDisplay.fill(self.white)
self.obstacle_list = []
self.goal_x = self.generate_randomval(_screen_width - _stripgoalx, _screen_width)
self.goal_y = self.generate_randomval(_screen_height - _stripgoaly, _screen_height)
agent_x = self.generate_randomval(0, _stripagentx)
agent_y = self.generate_randomval(0, _stripagenty)
dist = math.sqrt(math.pow(self.goal_x - agent_x, 2) + math.pow(self.goal_y - agent_y, 2))
self.old_dist = dist
while (1):
if self.calculate_distance((self.goal_x, self.goal_y), (agent_x, agent_y)) < 50:
agent_x = self.generate_randomval(0, _stripagentx)
agent_y = self.generate_randomval(0, _stripagenty)
else:
break
self.state = [(agent_x, agent_y), (self.goal_x, self.goal_y), dist , 0] #0 - not done /1 - done
# print('PPPP',self.total_obstacles)
self.agent_x = agent_x
self.agent_y = agent_y
self.total_reward_accumulated = 0
# intialize the static obstacles
for i in range(self.no_static_obstacles):
while (1):
# print("hit")
temp_obs = Obstacle(i)
if (not self.check_overlap_rect((temp_obs.x, temp_obs.y), (agent_x, agent_y),
self.rad_static_obstacles) and not self.check_overlap_rect(
(temp_obs.x, temp_obs.y), (self.goal_x, self.goal_y), self.rad_static_obstacles)):
self.static_obstacle_list.append(temp_obs)
self.state.append((temp_obs.x, temp_obs.y))
self.obstacle_list.append(temp_obs)
break
# print(len(self.obstacle_list))
# initialize the dynamic obstacles
for i in range(self.no_dynamic_obstacles):
temp_obs = Obstacle(self.no_static_obstacles+i)
temp_obs.speed = self.obstacle_speed_list[i]
temp_obs.curr_goal = self.obstacle_goal_list[i]
temp_obs.goal_change_counter = self.goal_change_step
self.dynamic_obstacle_list.append(temp_obs)
self.state.append((temp_obs.x, temp_obs.y))
self.obstacle_list.append(temp_obs)
self.total_distance = self.calculate_distance(self.state[0], self.state[1])
#self.sensor_readings = self.state_to_sensorReadings()
self.sensor_readings = featureExtractor.featureExtractor(self.state,self.obstacle_list,(self.agent_x_vel,self.agent_y_vel),self.agent_radius)
#self.agentBrain = NN(self.state_to_sensorReadings().size , self.hidden_layers,self.size_action_space)
return np.array(self.state)
def renderObstacle(self, obs):
pygame.draw.circle(self.gameDisplay, self.red, (obs.x, obs.y), obs.rad)
def render(self): # renders the screen using the current state of the environment
self.gameDisplay.fill(self.white)
for obs in self.obstacle_list:
self.renderObstacle(obs)
# draw goal
pygame.draw.rect(self.gameDisplay, self.black, [self.goal_x, self.goal_y, 10, 10])
# draw agent
pygame.draw.circle(self.gameDisplay, self.black, (int(self.state[0][0]), int(self.state[0][1])), self.agent_radius)
pygame.display.update()
self.clock.tick(30)
# updates the position of the objects in the environment according to the dynamics
# action is a tuple (x,y), which gets added to the current
# position of the agent
def step(self, action):
self.old_dist = self.calculate_distance(self.state[0], self.state[1])
old_x = self.state[0][0]
old_y = self.state[0][1]
newx = self.state[0][0] + action[0]
newy = self.state[0][1] + | |
"""
Simultaneous Machine Translateion
"""
from .nmt_uni import *
from .reward import return_reward
import time
import sys
timer = time.time
# utility functions
def _seqs2words(caps, idict):
capsw = []
for cc in caps:
ww = []
for w in cc:
if w == 0:
break
ww.append(idict[w])
capsw.append(' '.join(ww))
return capsw
def _bpe2words(capsw):
capw = []
for cc in capsw:
capw += [cc.replace('@@ ', '')]
return capw
def _action2delay(src, actions):
delays = []
X = len(src)
for act in actions:
A = numpy.array(act, dtype='float32')
Y = numpy.sum(act)
S = numpy.sum(numpy.cumsum(1 - A) * A)
assert (X > 0) and (Y > 0), 'avoid NAN {}, {}'.format(X, Y)
tau = S / (Y * X)
delays.append([tau, X, Y, S])
return delays
# padding for computing policy gradient
def _padding(arrays, shape, dtype='float32', return_mask=False, sidx=0):
B = numpy.zeros(shape, dtype=dtype)
if return_mask:
M = numpy.zeros((shape[0], shape[1]), dtype='float32')
for it, arr in enumerate(arrays):
arr = numpy.asarray(arr, dtype=dtype)
# print arr.shape
steps = arr.shape[0]
if arr.ndim < 2:
B[sidx: steps + sidx, it] = arr
else:
steps2 = arr.shape[1]
B[sidx: steps + sidx, it, : steps2] = arr
if return_mask:
M[sidx: steps + sidx, it] = 1.
if return_mask:
return B, M
return B
class PIPE(object):
def __init__(self, keys=None):
self.messages = OrderedDict()
self.hyp_messages = OrderedDict()
self.new_hyp_messages = OrderedDict()
for key in keys:
self.messages[key] = []
def reset(self):
for key in self.messages:
self.messages[key] = []
self.hyp_messages = OrderedDict()
self.new_hyp_messages = OrderedDict()
def clean_hyp(self):
self.hyp_messages = OrderedDict()
def clean_new_hyp(self):
self.new_hyp_messages = OrderedDict()
def init_hyp(self, key, live_k=None):
if live_k is not None:
self.hyp_messages[key] = [[] for _ in range(live_k)]
else:
self.hyp_messages[key] = []
def init_new_hyp(self, key, use_copy=False):
if use_copy:
self.new_hyp_messages[key] = copy.copy(self.hyp_messages[key])
else:
self.new_hyp_messages[key] = []
def append(self, key, new, idx=None, use_hyp=False):
if not use_hyp:
self.new_hyp_messages[key].append(new)
else:
self.new_hyp_messages[key].append(self.hyp_messages[key][idx] + [new])
def append_new(self, key, idx, hyper=True):
if hyper:
self.hyp_messages[key].append(self.new_hyp_messages[key][idx])
else:
# print self.messages['sample']
self.messages[key].append(self.new_hyp_messages[key][idx])
def add(self, key, new, idx):
self.new_hyp_messages[key][idx] += new
def asarray(self, key, replace=False):
if replace:
self.hyp_messages[key] = numpy.array(self.hyp_messages[key])
else:
return numpy.array(self.hyp_messages[key], dtype='float32')
def split(self):
truth = OrderedDict()
sample = OrderedDict()
for key in self.messages:
if key == 'source':
continue
truth[key] = []
sample[key] = []
if key == 'mask':
for idx in range(len(self.messages['source'])):
if self.messages['source'][idx] < 0:
sample[key].append(self.messages[key][:, idx])
else:
truth[key].append(self.messages[key][:, idx])
else:
for idx in range(len(self.messages['source'])):
if self.messages['source'][idx] < 0:
sample[key].append(self.messages[key][idx])
else:
truth[key].append(self.messages[key][idx])
self.messages = sample
return truth
# ==============================================================
# Simultaneous Translation in Batch-mode
# ==============================================================
def simultaneous_decoding(funcs,
_policy,
srcs, # source sentences
trgs, # taeget sentences
t_idict=None,
step=1, peek=1, sidx=3,
n_samples=10,
maxlen=120,
reward_config=None,
train=False,
use_forget=False,
forget_left=True,
use_newinput=False,
full_attention=False,
use_coverage=False,
on_groundtruth=0,
src_eos=True):
# unzip functions
f_sim_ctx = funcs[0]
f_sim_init = funcs[1]
f_sim_next = funcs[2]
f_cost = funcs[3]
if reward_config['finetune']:
ff_init = funcs[4]
ff_cost = funcs[5]
ff_update = funcs[6]
Statistcs = OrderedDict()
n_sentences = len(srcs)
n_out = 3 if use_forget else 2
max_steps = -1
_probs = numpy.zeros((n_out,))
_total = 0
# check
# if reward_config['greedy']:
# print 'use greedy policy'
# ============================================================================ #
# Generating Trajectories based on Current Policy
# ============================================================================ #
live_k = (n_samples + on_groundtruth) * n_sentences
live_all = live_k
# Critical! add the <eos>
srcs = [src + [0] for src in srcs]
src_max = max([len(src) for src in srcs])
if src_max < sidx:
sidx = src_max
x, ctx0, z0, secs0 = [], [], [], []
# data initialization
for id, (src, trg) in enumerate(zip(srcs, trgs)):
_x = numpy.array(src, dtype='int64')[:, None]
_, _ctx0, _ = f_sim_ctx(_x)
_z0 = f_sim_init(_ctx0[:sidx, :])
# _z0 = f_sim_init(_ctx0)
# print 'state', init
# print 'state', _z0
# print 'ctx0', _ctx0, _ctx0.shape
# print 'ctx_mean', m
x.append(_x[:, 0])
ctx0.append(_ctx0[:, 0, :])
z0.append(_z0.flatten())
secs0.append([id, len(src), 0]) # word id / source length / correctness
# pad the results
x, x_mask = _padding(x, (src_max, n_sentences), dtype='int64', return_mask=True)
ctx = _padding(ctx0, (src_max, n_sentences, ctx0[0].shape[-1]))
z0 = numpy.asarray(z0)
mask = numpy.asarray([1.] * sidx + [0.] * (src_max - sidx), dtype='float32')[:, None]
one = numpy.asarray([1.] * src_max, dtype='float32')[:, None]
# hidden states
hidden0 = _policy.init_hidden()
# if we have multiple samples for one input sentence
mask = numpy.tile(mask, [1, live_k])
z0 = numpy.tile(z0, [live_k / n_sentences, 1])
ctx = numpy.tile(ctx, [1, live_k / n_sentences, 1])
hidden0 = numpy.tile(hidden0, [live_k, 1])
secs = []
for _ in range(live_k / n_sentences):
secs += copy.deepcopy(secs0)
# ============================================================================ #
# PIPE for message passing
# =========================================================================== #
pipe = PIPE(['sample', 'score', 'action', 'obs', 'attentions',
'old_attend', 'coverage', 'source', 'forgotten', 'secs', 'cmask'])
# Build for the temporal results: hyp-message
for key in ['sample', 'obs', 'attentions', 'hidden', 'old_attend', 'cmask']:
pipe.init_hyp(key, live_k)
# special care
pipe.hyp_messages['source'] = [-1 for _ in range(n_samples)] + [0 for _ in range(on_groundtruth)]
pipe.hyp_messages['source'] = [si for si in pipe.hyp_messages['source'] for _ in range(n_sentences)]
pipe.hyp_messages['score'] = numpy.zeros(live_k).astype('float32')
pipe.hyp_messages['action'] = [[0] * sidx for _ in range(live_k)]
pipe.hyp_messages['forgotten'] = [[-1] * sidx for _ in range(live_k)]
pipe.hyp_messages['coverage'] = numpy.zeros((live_k, ctx.shape[0])).astype('float32')
pipe.hyp_messages['mask'] = mask
pipe.hyp_messages['ctx'] = ctx
pipe.hyp_messages['secs'] = secs
pipe.hyp_messages['states'] = z0
pipe.hyp_messages['heads'] = numpy.asarray([[sidx, 0, 0]] * live_k) # W C F
# these are inputs that needs to be updated
prev_w = -1 * numpy.ones((live_k,)).astype('int64')
prev_z = z0
prev_hid = hidden0
step = 0
# =======================================================================
# ROLLOUT: Iteration until all the samples over.
# Action space:
# 0: Read,
# 1: Commit,
# 2: Forget,
# =======================================================================
while live_k > 0:
step += 1
inps = [prev_w, ctx, mask, prev_z]
# print mask
next_p, _, next_z, next_o, next_a, cur_emb = f_sim_next(*inps)
if full_attention:
old_mask = numpy.tile(one, [1, live_k])
inps2 = inps
inps2[2] = old_mask
_, _, _, _, next_fa, _ = f_sim_next(*inps2)
# obtain the candidate and the accumulated score.
_cand = next_p.argmax(axis=-1) # live_k
_score = next_p[list(range(live_k)), _cand]
# new place-holders for temporal results: new-hyp-message
pipe.clean_new_hyp()
for key in ['sample', 'score', 'heads', 'attentions', 'old_attend', 'coverage', 'source',
'mask', 'ctx', 'secs', 'states', 'cmask']:
pipe.init_new_hyp(key, use_copy=True)
for key in ['action', 'forgotten', 'obs', 'hidden']:
pipe.init_new_hyp(key, use_copy=False)
cov = pipe.new_hyp_messages['coverage'] * pipe.new_hyp_messages['mask'].T \
+ next_a # clean that has been forgotten
# current maximum
cid = cov.argmax(axis=-1)
# Rollout the action.
_actions, _aprop, _hidden, _z = _policy.action(next_o, prev_hid) # input the current observation
# print _actions.shape
if reward_config['greedy']:
_actions = _aprop.argmax(-1)
# print _actions.shape
_total += _aprop.shape[0]
_probs += _aprop.sum(axis=0)
# check each candidate
for idx, wi in enumerate(_cand):
# collect the action
a = _actions[idx]
# ***** Evaluate the Action !!! *****
# for wait:
if reward_config.get('upper', False):
# a = 1 - pipe.hyp_messages['action'][idx][-1]
a = 0 # testing upper bound: only wait
if reward_config['greedy'] and (pipe.new_hyp_messages['heads'][idx, 0]
>= pipe.new_hyp_messages['secs'][idx][1]):
a = 1 # in greedy mode. must end.
if reward_config['greedy'] and (pipe.new_hyp_messages['heads'][idx, 2]
>= pipe.new_hyp_messages['heads'][idx, 0]):
a = 1 # in greedy mode. must end.
# must read the whole sentence
# if pipe.new_hyp_messages['heads'][idx, 0] < pipe.new_hyp_messages['secs'][idx][1]:
# if wi == 0: # end before read the last source words ---> wait!!
# a = 0
# message appending
pipe.append('obs', next_o[idx], idx=idx, use_hyp=True)
pipe.append('action', a, idx=idx, use_hyp=True) # collect action.
pipe.append('hidden', _hidden[idx])
# print pipe.hyp_messages['heads'][idx]
if a == 0:
# read-head move on one step
# print 'p', pipe.hyp_messages['heads'][idx, 0], pipe.hyp_messages['secs'][idx]
if pipe.new_hyp_messages['heads'][idx, 0] < pipe.new_hyp_messages['secs'][idx][1]:
pipe.new_hyp_messages['mask'][pipe.new_hyp_messages['heads'][idx, 0], idx] = 1
pipe.new_hyp_messages['heads'][idx, 0] += 1
pipe.append('forgotten', -1, idx=idx, use_hyp=True)
# if the first word is still waiting for decoding
# """
if numpy.sum(pipe.new_hyp_messages['action'][idx]) == 0:
temp_sidx = pipe.new_hyp_messages['heads'][idx, 0]
_ctx0 = ctx0[pipe.new_hyp_messages['secs'][idx][0]][:, None, :]
_z0 = f_sim_init(_ctx0[:temp_sidx]) # initializer
pipe.new_hyp_messages['states'][idx] = _z0
# """
# for commit:
elif a == 1:
# print mask
# update new_hyp_message
head_t = pipe.new_hyp_messages['source'][idx]
if head_t == -1: # use generated samples
pipe.add('sample', [wi], idx)
pipe.add('cmask', [mask], idx)
else:
pipe.add('sample', [trg[head_t] if head_t < len(trg) else 0], idx) # use ground-truth
pipe.new_hyp_messages['source'][idx] += 1
pipe.add('score', _score[idx], idx)
pipe.add('attentions', [next_a[idx]], idx)
pipe.append('forgotten', -1, idx=idx, use_hyp=True)
if full_attention:
pipe.add('old_attend', [next_fa[idx]], idx)
# *** special care
pipe.new_hyp_messages['states'][idx] = next_z[idx]
pipe.new_hyp_messages['heads'][idx, 1] += 1
pipe.new_hyp_messages['coverage'][idx] = cov[idx]
# for forget:
elif a == 2:
# move the forget head.
if forget_left:
_idx = pipe.new_hyp_messages['heads'][idx, 2]
if pipe.new_hyp_messages['heads'][idx, 2] < pipe.new_hyp_messages['heads'][idx, 0]:
| |
#!/usr/bin/python2.4
# -*- coding: iso-8859-15 -*-
import sys, os
import traceback
import re
from time import sleep
import getpass
from datetime import datetime
from string import join
from klab.fs.fsio import read_file
# Database import functions
# Use oursql if available; it's more up to date
try:
import oursql as MySQLdb
import oursql.cursors as cursors
raise Exception('oursql')
except ImportError:
import MySQLdb
import MySQLdb.cursors as cursors
DictCursor = cursors.DictCursor
SSDictCursor = cursors.SSDictCursor
StdCursor = cursors.Cursor
class DatabaseInterface(object):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=<PASSWORD>, port=None,
unix_socket=None, passwdfile=None, use_utf=False, use_locking=True):
self.connection = None
self.StdCursor_connection = None
self.SSDictCursor_connection = None
self.queries_run = 0
self.procedures_run = 0
self.use_utf = use_utf
self.isInnoDB = isInnoDB
self.host = host or settings["SQLHost"]
self.db = db or settings["SQLDatabase"]
self.user = user or settings["SQLUser"]
self.passwd = passwd or settings.get("SQLPassword") or '' # allow for empty passwords e.g. for anonymous accounts with read-only access
self.port = port or settings["SQLPort"]
self.unix_socket = unix_socket or settings["SQLSocket"]
if use_locking == True or use_locking == False:
self.use_locking = use_locking
else:
settings["SQLUseLocking"]
self.numTries = numTries
self.lastrowid = None
if (not self.passwd) and passwdfile:
if os.path.exists(passwdfile):
passwd = read_file(passwdfile).strip()
else:
passwd = getpass.getpass("Enter password to connect to MySQL database:")
self.locked = False
if use_locking:
self.lockstring = "LOCK TABLES %s" % join(["%s WRITE" % list(r.values())[0] for r in self.execute("SHOW TABLES")], ", ")
self.unlockstring = "UNLOCK TABLES"
else:
self.lockstring = ""
self.unlockstring = ""
# Store a list of the table names
self.TableNames = [list(r.values())[0] for r in self.execute("SHOW TABLES")]
# Store a hierarchy of objects corresponding to the table names and their field names
self.FieldNames = _FieldNames(None)
self.FlatFieldNames = _FieldNames(None)
tablenames = self.TableNames
for tbl in tablenames:
setattr(self.FieldNames, tbl, _FieldNames(tbl))
fieldDescriptions = self.execute("SHOW COLUMNS FROM `%s`" % tbl)
for field in fieldDescriptions:
fieldname = field["Field"]
setattr(getattr(self.FieldNames, tbl), fieldname, fieldname)
setattr(self.FlatFieldNames, fieldname, fieldname)
getattr(self.FieldNames, tbl).makeReadOnly()
self.FieldNames.makeReadOnly()
self.FlatFieldNames.makeReadOnly()
def __del__(self):
if self.connection and self.connection.open:
self.connection.close()
if self.StdCursor_connection and self.StdCursor_connection.open:
self.StdCursor_connection.close()
def close(self):
if self.connection and self.connection.open:
self.connection.close()
if self.StdCursor_connection and self.StdCursor_connection.open:
self.StdCursor_connection.close()
def checkIsClosed(self):
assert ((not (self.connection) or not (self.connection.open)) and (not (self.StdCursor_connection) or not (self.StdCursor_connection.open)))
def _get_connection(self, force = False):
if force or not (self.connection and self.connection.open):
if self.use_utf:
self.connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=<PASSWORD>,
port=self.port, unix_socket=self.unix_socket, cursorclass=DictCursor,
charset='utf8', use_unicode=True)
else:
self.connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.<PASSWORD>,
port=self.port, unix_socket=self.unix_socket, cursorclass=DictCursor)
def _get_StdCursor_connection(self):
if not (self.StdCursor_connection and self.StdCursor_connection.open):
if self.use_utf:
self.StdCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=<PASSWORD>,
port=self.port, unix_socket=self.unix_socket, cursorclass=StdCursor,
charset='utf8', use_unicode=True)
else:
self.StdCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=<PASSWORD>,
port=self.port, unix_socket=self.unix_socket, cursorclass=StdCursor)
def _get_SSDictCursor_connection(self):
if not (self.SSDictCursor_connection and self.SSDictCursor_connection.open):
if self.use_utf:
self.SSDictCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=<PASSWORD>,
port=self.port, unix_socket=self.unix_socket, cursorclass=SSDictCursor,
charset='utf8', use_unicode=True)
else:
self.SSDictCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=<PASSWORD>,
port=self.port, unix_socket=self.unix_socket, cursorclass=SSDictCursor)
def _close_connection(self):
self.close()
def iterate_query(self, query, arraysize=100000):
self._get_SSDictCursor_connection()
c = self.SSDictCursor_connection.cursor()
c.execute(query)
while True:
nextrows = c.fetchmany(arraysize)
if not nextrows:
break
for row in nextrows:
yield row
def getLastRowID(self):
return self.lastrowid
def locked_execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
return self.execute(sql, parameters=parameters, quiet=quiet, locked=True, do_commit=True)
def transaction_insert_dict_auto_inc(self, transaction_cursor, tblname, d, unique_id_fields = [], fields = None, check_existing = False, id_field = 'ID'):
'''A transaction wrapper for inserting dicts into fields with an autoincrementing ID. Insert the record and return the associated ID (long).'''
sql, params, record_exists = self.create_insert_dict_string(tblname, d, PKfields=unique_id_fields, fields=fields, check_existing = check_existing)
if not record_exists:
transaction_cursor.execute(sql, params)
id = transaction_cursor.lastrowid
if id == None:
id = self.get_unique_record('SELECT * FROM {0} WHERE {1}'.format(tblname, ' AND '.join([f + '=%s' for f in unique_id_fields])), parameters = tuple([d[f] for f in unique_id_fields]))[id_field]
assert(id)
return id
def get_unique_record(self, sql, parameters = None, quiet = False, locked = False):
'''I use this pattern a lot. Return the single record corresponding to the query.'''
results = self.execute_select(sql, parameters = parameters, quiet = quiet, locked = locked)
assert(len(results) == 1)
return results[0]
def execute_select(self, sql, parameters = None, quiet = False, locked = False):
if locked:
print(('LOCKED execute_select {0} {1}'.format(sql, parameters)))
return self.execute(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_select_StdCursor(self, sql, parameters=None, quiet=False, locked=False):
return self.execute_StdCursor(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_select_SSDictCursor(self, sql, parameters=None, quiet=False, locked=False):
return self.execute_SSDictCursor(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_SSDictCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
"""Execute SQL query. This uses DictCursor by default."""
self.queries_run += 1
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = SSDictCursor
if sql.find(";") != -1 or sql.find("\\G") != -1:
# Catches some injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
self._get_SSDictCursor_connection()
cursor = self.SSDictCursor_connection.cursor()
if locked:
if self.lockstring:
print((sql, parameters))
print('LOCKING')
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.SSDictCursor_connection.commit()
results = cursor.fetchall()
if locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
return results
except MySQLdb.OperationalError as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write(
"\nSQL execution error in query %s at %s:" % (sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def execute_StdCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
"""Execute SQL query. This uses DictCursor by default."""
self.queries_run += 1
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = StdCursor
if sql.find(";") != -1 or sql.find("\\G") != -1:
# Catches some injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
self._get_StdCursor_connection()
cursor = self.StdCursor_connection.cursor()
if locked:
if self.lockstring:
print((sql, parameters))
print('LOCKING')
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.StdCursor_connection.commit()
results = cursor.fetchall()
if locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
return results
except MySQLdb.OperationalError as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write(
"\nSQL execution error in query %s at %s:" % (sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def list_stored_procedures(self):
return [r['Name'] for r in self.execute("SHOW PROCEDURE STATUS")]
def run_transaction(self, command_list, do_commit=True):
'''This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful
if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur
if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids
the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.'''
pass
# I decided against creating this for now.
# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure
# in the DDGadmin project and then use callproc
for c in command_list:
if c.find(";") != -1 or c.find("\\G") != -1:
# Catches *some* injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c)
if do_commit:
sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n".join(command_list)
else:
sql = "START TRANSACTION;\n%s;" % "\n".join(command_list)
#print(sql)
return
def execute(self, sql, parameters=None, quiet=False, locked=False, do_commit=True, allow_unsafe_query=False):
"""Execute SQL query. This uses DictCursor by default."""
if do_commit:
pass#print('s')
self.queries_run += 1
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = DictCursor
if not(allow_unsafe_query) and (sql.find(";") != -1 or sql.find("\\G") != -1):
# Catches some injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
| |
io, filename=name, content_type='application/octet-stream')
return form, contains_attachments
def add_file_to_message_data(message_data, file, contains_content, is_edit):
"""
Adds files to the message data creating a form data if applicable.
Parameters
----------
message_data : `dict` of (`str`, `Any`) items
The message's payload to send.
file : `None`, `dict` of (`file-name`, `io`) items, `list` of (`file-name`, `io`) elements, \
tuple (`file-name`, `io`), `io`
The files to send.
contains_content : `bool`
Whether the message already contains any content.
is_edit : `bool`
Whether we are creating edit file form.
Returns
-------
message_data : `None`, `dict`, ``Formdata``
Returns a ``Formdata`` if the message contains attachments, `dict` if contains any content and `None` if
not.
Raises
------
ValueError
If more than `10` file is registered to be sent.
"""
if (file is (... if is_edit else None)):
if not contains_content:
message_data = None
else:
form, contains_attachments = create_file_form(message_data, file)
if (form is None):
if (not contains_content) and (not contains_attachments):
message_data = None
else:
message_data = form
return message_data
else:
def create_file_form(data, file):
"""
Creates a `multipart/form-data` form from the message's data and from the file data. If there is no files to
send, will return `None` to tell the caller, that nothing is added to the overall data.
Parameters
----------
data : `dict` of `Any`
The data created by the ``.message_create`` method.
file : `dict` of (`file-name`, `io`) items, `list` of (`file-name`, `io`) elements, tuple (`file-name`, `io`),
`io`
The files to send.
Returns
-------
form : `None`, `Formdata`
Returns a `Formdata` of the files and from the message's data. If there are no files to send, returns `None`
instead.
Raises
------
ValueError
If more than `10` file is registered to be sent.
Notes
-----
Accepted `io` types with check order are:
- ``BodyPartReader``
- `bytes`, `bytearray`, `memoryview`
- `str`
- `BytesIO`
- `StringIO`
- `TextIOBase`
- `BufferedReader`, `BufferedRandom`
- `IOBase`
- ``AsyncIO``
- `async-iterable`
Raises `TypeError` at the case of invalid `io` type.
There are two predefined data types specialized to send files:
- ``ReuBytesIO``
- ``ReuAsyncIO``
If a buffer is sent, then when the request is done, it is closed. So if the request fails, we would not be
able to resend the file, except if we have a data type, what instead of closing on `.close()` just seeks to
`0` (or later if needed) on close, instead of really closing instantly. These data types implement a
`.real_close()` method, but they do `real_close` on `__exit__` as well.
"""
form = Formdata()
form.add_field('payload_json', to_json(data))
files = []
# checking structure
# case 0 none
if file is None:
pass
# case 1 dict like
elif hasattr(type(file), 'items'):
files.extend(file.items())
# case 2 tuple => file, filename pair
elif isinstance(file, tuple):
files.append(file)
# case 3 list like
elif isinstance(file, (list, deque)):
for element in file:
if type(element) is tuple:
name, io = element
else:
io = element
name = ''
if not name:
#guessing name
name = getattr(io, 'name', '')
if name:
_, name = split_path(name)
else:
name = str(random_id())
files.append((name, io),)
#case 4 file itself
else:
name = getattr(file, 'name', '')
#guessing name
if name:
_, name = split_path(name)
else:
name = str(random_id())
files.append((name, file),)
# checking the amount of files
# case 1 one file
if len(files) == 1:
name, io = files[0]
form.add_field('file', io, filename=name, content_type='application/octet-stream')
# case 2, no files -> return None, we should use the already existing data
elif len(files) == 0:
return None
# case 3 maximum 10 files
elif len(files) < 11:
for index, (name, io) in enumerate(files):
form.add_field(f'file{index}s', io, filename=name, content_type='application/octet-stream')
# case 4 more than 10 files
else:
raise ValueError(
f'Can send up to 10 files at once, got {len(files)!r}; {reprlib.repr(files)}.'
)
return form
def add_file_to_message_data(message_data, file, contains_content, is_edit):
"""
Adds files to the message data creating a form data if applicable.
Parameters
----------
message_data : `dict` of (`str`, `Any`) items
The message's payload to send.
file : `None`, `dict` of (`file-name`, `io`) items, `list` of (`file-name`, `io`) elements, \
tuple (`file-name`, `io`), `io`
The files to send.
contains_content : `bool`
Whether the message already contains any content.
is_edit : `bool`
Whether we are creating edit file form.
Returns
-------
message_data : `None`, `dict`, ``Formdata``
Returns a ``Formdata`` if the message contains attachments, `dict` if contains any content and `None` if
not.
Raises
------
ValueError
If more than `10` file is registered to be sent.
"""
if (file is (... if is_edit else None)):
if not contains_content:
message_data = None
else:
form = create_file_form(message_data, file)
if (form is None) and (not contains_content):
message_data = None
else:
message_data = form
return message_data
def validate_content_and_embed(content, embed, is_edit):
"""
Validates the given content and embed fields of a message creation or edition.
Parameters
----------
content : `str`, ``EmbedBase``, `Any`, Optional
The content of the message.
If given as ``EmbedBase``, then the message's embeds will be edited with it.
embed : `None`, ``EmbedBase``, `list` of ``EmbedBase``, Optional (Keyword only)
The new embedded content of the message. By passing it as `None`, you can remove the old.
> If `embed` and `content` parameters are both given as ``EmbedBase``, then `AssertionError` is
raised.
is_edit : `bool`
Whether the processed `content` and `embed` fields are for message edition. At this case passing `None` will
remove them.
Returns
-------
content : `Ellipsis`, `None`, `str`
The message's content.
embed : `Ellipsis`, `None`, ``EmbedBase``, (`list`, `tuple`) of ``EmbedBase``
The messages embeds.
Raises
------
TypeError
If `embed` was not given neither as ``EmbedBase`` nor as `list`, `tuple` of ``EmbedBase``-s.
AssertionError
- If `embed` contains a non ``EmbedBase`` element.
- If both `content` and `embed` fields are embeds.
"""
# Embed check order:
# 1.: None
# 2.: Ellipsis -> None || Ellipsis
# 3.: Embed : -> embed || [embed]
# 4.: list of Embed -> embed[0] || embed[:10] or None
# 5.: raise
if embed is None:
pass
elif embed is ...:
if not is_edit:
embed = None
elif isinstance(embed, EmbedBase):
embed = [embed]
elif isinstance(embed, (list, tuple)):
if embed:
if __debug__:
for embed_element in embed:
if not isinstance(embed_element, EmbedBase):
raise AssertionError(
f'`embed` can contains `{EmbedBase.__name__}` elements, got '
f'{embed_element.__class__.__name__}; {embed_element!r}; embed={embed!r}.'
)
embed = embed[:10]
else:
embed = None
else:
raise TypeError(
f'`embed` can be `{EmbedBase.__name__}`, (`list`, `tuple`) of {EmbedBase.__name__}, got '
f'{embed.__class__.__name__}; {embed!r}.'
)
# Content check order:
# 1.: None -> None || ''
# 2.: Ellipsis -> None || Ellipsis
# 3.: str
# 4.: Embed -> embed = content || [content]
# 5.: list of Embed -> embed = content[0] || content[:10]
# 6.: object -> str(content)
if content is None:
if is_edit:
content = ''
elif content is ...:
if not is_edit:
content = None
elif isinstance(content, str):
pass
elif isinstance(content, EmbedBase):
if __debug__:
if (embed is not (... if is_edit else None)):
raise AssertionError(
f'Multiple parameters were given as embed, got content={content!r}, embed={embed!r}.'
)
embed = [content]
if is_edit:
content = ...
else:
content = None
else:
# Check for list of embeds as well.
if isinstance(content, (list, tuple)):
if content:
for element in content:
if isinstance(element, EmbedBase):
continue
is_list_of_embeds = False
break
else:
is_list_of_embeds = True
else:
is_list_of_embeds = False
else:
is_list_of_embeds = False
if is_list_of_embeds:
if __debug__:
if (embed is not (... if is_edit else None)):
raise AssertionError(
f'Multiple parameters were given as embed, got content={content!r}, embed={embed!r}.'
)
embed = content[:10]
if is_edit:
content = ...
else:
content = None
else:
content = str(content)
return content, embed
def get_channel_id(channel, type_checker):
"""
Gets the channel's identifier from the given channel or | |
noqa: E501
"""Finds Pets by status # noqa: E501
Multiple status values can be provided with comma separated strings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_pets_by_status_with_http_info(status, async_req=True)
>>> result = thread.get()
:param status: Status values that need to be considered for filter (required)
:type status: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[Pet], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'status'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method find_pets_by_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'status' is set
if self.api_client.client_side_validation and ('status' not in local_var_params or # noqa: E501
local_var_params['status'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `status` when calling `find_pets_by_status`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'status' in local_var_params and local_var_params['status'] is not None: # noqa: E501
query_params.append(('status', local_var_params['status'])) # noqa: E501
collection_formats['status'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['petstore_auth'] # noqa: E501
response_types_map = {
200: "list[Pet]",
400: None,
}
return self.api_client.call_api(
'/pet/findByStatus', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def find_pets_by_tags(self, tags, **kwargs): # noqa: E501
"""Finds Pets by tags # noqa: E501
Multiple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_pets_by_tags(tags, async_req=True)
>>> result = thread.get()
:param tags: Tags to filter by (required)
:type tags: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[Pet]
"""
kwargs['_return_http_data_only'] = True
return self.find_pets_by_tags_with_http_info(tags, **kwargs) # noqa: E501
def find_pets_by_tags_with_http_info(self, tags, **kwargs): # noqa: E501
"""Finds Pets by tags # noqa: E501
Multiple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_pets_by_tags_with_http_info(tags, async_req=True)
>>> result = thread.get()
:param tags: Tags to filter by (required)
:type tags: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[Pet], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'tags'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method find_pets_by_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'tags' is set
if self.api_client.client_side_validation and ('tags' not in local_var_params or # noqa: E501
local_var_params['tags'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tags` when calling `find_pets_by_tags`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'tags' in local_var_params and local_var_params['tags'] is not None: # noqa: E501
query_params.append(('tags', local_var_params['tags'])) # noqa: E501
collection_formats['tags'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['petstore_auth'] # noqa: E501
response_types_map = {
200: "list[Pet]",
400: None,
}
return self.api_client.call_api(
'/pet/findByTags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_pet_by_id(self, pet_id, **kwargs): # noqa: E501
"""Find pet by ID # noqa: E501
Returns a single pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pet_by_id(pet_id, async_req=True)
>>> result = thread.get()
:param pet_id: ID of pet to return (required)
:type pet_id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Pet
"""
kwargs['_return_http_data_only'] = True
return self.get_pet_by_id_with_http_info(pet_id, **kwargs) # noqa: E501
def get_pet_by_id_with_http_info(self, pet_id, **kwargs): # noqa: E501
"""Find pet by ID # noqa: E501
Returns a single pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pet_by_id_with_http_info(pet_id, async_req=True)
>>> result = thread.get()
:param pet_id: ID of pet to return (required)
:type pet_id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Pet, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'pet_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pet_by_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'pet_id' is set
| |
<filename>src/utils/hdf5_getters.py
"""
<NAME> (2010) Columbia University
<EMAIL>
This code contains a set of getters functions to access the fields
from an HDF5 song file (regular file with one song or
aggregate / summary file with many songs)
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2010, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import tables
def open_h5_file_read(h5filename):
"""
Open an existing H5 in read mode.
Same function as in hdf5_utils, here so we avoid one import
"""
return tables.open_file(h5filename, mode='r')
def get_num_songs(h5):
"""
Return the number of songs contained in this h5 file, i.e. the number of rows
for all basic informations like name, artist, ...
"""
return h5.root.metadata.songs.nrows
def get_artist_familiarity(h5, songidx=0):
"""
Get artist familiarity from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_familiarity[songidx]
def get_artist_hotttnesss(h5, songidx=0):
"""
Get artist hotttnesss from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_hotttnesss[songidx]
def get_artist_id(h5, songidx=0):
"""
Get artist id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_id[songidx]
def get_artist_mbid(h5, songidx=0):
"""
Get artist musibrainz id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_mbid[songidx]
def get_artist_playmeid(h5, songidx=0):
"""
Get artist playme id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_playmeid[songidx]
def get_artist_7digitalid(h5, songidx=0):
"""
Get artist 7digital id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_7digitalid[songidx]
def get_artist_latitude(h5, songidx=0):
"""
Get artist latitude from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_latitude[songidx]
def get_artist_longitude(h5, songidx=0):
"""
Get artist longitude from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_longitude[songidx]
def get_artist_location(h5, songidx=0):
"""
Get artist location from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_location[songidx]
def get_artist_name(h5, songidx=0):
"""
Get artist name from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_name[songidx]
def get_release(h5, songidx=0):
"""
Get release from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.release[songidx]
def get_release_7digitalid(h5, songidx=0):
"""
Get release 7digital id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.release_7digitalid[songidx]
def get_song_id(h5, songidx=0):
"""
Get song id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.song_id[songidx]
def get_song_hotttnesss(h5, songidx=0):
"""
Get song hotttnesss from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.song_hotttnesss[songidx]
def get_title(h5, songidx=0):
"""
Get title from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.title[songidx]
def get_track_7digitalid(h5, songidx=0):
"""
Get track 7digital id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.track_7digitalid[songidx]
def get_similar_artists(h5, songidx=0):
"""
Get similar artists array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.similar_artists[h5.root.metadata.songs.cols.idx_similar_artists[songidx]:]
return h5.root.metadata.similar_artists[h5.root.metadata.songs.cols.idx_similar_artists[songidx]:
h5.root.metadata.songs.cols.idx_similar_artists[songidx + 1]]
def get_artist_terms(h5, songidx=0):
"""
Get artist terms array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.artist_terms[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:]
return h5.root.metadata.artist_terms[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:
h5.root.metadata.songs.cols.idx_artist_terms[songidx + 1]]
def get_artist_terms_freq(h5, songidx=0):
"""
Get artist terms array frequencies. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.artist_terms_freq[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:]
return h5.root.metadata.artist_terms_freq[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:
h5.root.metadata.songs.cols.idx_artist_terms[songidx + 1]]
def get_artist_terms_weight(h5, songidx=0):
"""
Get artist terms array frequencies. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.artist_terms_weight[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:]
return h5.root.metadata.artist_terms_weight[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:
h5.root.metadata.songs.cols.idx_artist_terms[songidx + 1]]
def get_analysis_sample_rate(h5, songidx=0):
"""
Get analysis sample rate from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.analysis_sample_rate[songidx]
def get_audio_md5(h5, songidx=0):
"""
Get audio MD5 from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.audio_md5[songidx]
def get_danceability(h5, songidx=0):
"""
Get danceability from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.danceability[songidx]
def get_duration(h5, songidx=0):
"""
Get duration from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.duration[songidx]
def get_end_of_fade_in(h5, songidx=0):
"""
Get end of fade in from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.end_of_fade_in[songidx]
def get_energy(h5, songidx=0):
"""
Get energy from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.energy[songidx]
def get_key(h5, songidx=0):
"""
Get key from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.key[songidx]
def get_key_confidence(h5, songidx=0):
"""
Get key confidence from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.key_confidence[songidx]
def get_loudness(h5, songidx=0):
"""
Get loudness from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.loudness[songidx]
def get_mode(h5, songidx=0):
"""
Get mode from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.mode[songidx]
def get_mode_confidence(h5, songidx=0):
"""
Get mode confidence from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.mode_confidence[songidx]
def get_start_of_fade_out(h5, songidx=0):
"""
Get start of fade out from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.start_of_fade_out[songidx]
def get_tempo(h5, songidx=0):
"""
Get tempo from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.tempo[songidx]
def get_time_signature(h5, songidx=0):
"""
Get signature from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.time_signature[songidx]
def get_time_signature_confidence(h5, songidx=0):
"""
Get signature confidence from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.time_signature_confidence[songidx]
def get_track_id(h5, songidx=0):
"""
Get track id from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.track_id[songidx]
def get_segments_start(h5, songidx=0):
"""
Get segments start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_start[h5.root.analysis.songs.cols.idx_segments_start[songidx]:]
return h5.root.analysis.segments_start[h5.root.analysis.songs.cols.idx_segments_start[songidx]:
h5.root.analysis.songs.cols.idx_segments_start[songidx + 1]]
def get_segments_confidence(h5, songidx=0):
"""
Get segments confidence array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_confidence[h5.root.analysis.songs.cols.idx_segments_confidence[songidx]:]
return h5.root.analysis.segments_confidence[h5.root.analysis.songs.cols.idx_segments_confidence[songidx]:
h5.root.analysis.songs.cols.idx_segments_confidence[songidx + 1]]
def get_segments_pitches(h5, songidx=0):
"""
Get segments pitches array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_pitches[h5.root.analysis.songs.cols.idx_segments_pitches[songidx]:, :]
return h5.root.analysis.segments_pitches[h5.root.analysis.songs.cols.idx_segments_pitches[songidx]:
h5.root.analysis.songs.cols.idx_segments_pitches[songidx + 1], :]
def get_segments_timbre(h5, songidx=0):
"""
Get segments timbre array. Takes | |
<reponame>RathmoreChaos/intficpy<filename>intficpy/things.py
import copy
from .actor import Actor, Player
from .thing_base import Thing
from .daemons import Daemon
class Holder(Thing):
"""
An item that can hold or contain another item.
This is the base class for Surfaces, Container,
UnderSpaces, etc.
"""
contains_preposition = "in"
def revealContents(self):
self.revealed = True
for item in self.topLevelContentsList:
item.makeKnown(self.game.me)
def playerAboutToAddItem(self, item, preposition, event="turn", **kwargs):
"""
The prepartations we make when the player is about to try to add an item
to this item. Performs any implicit actions needed to add the item.
Returns True if the item addition is allowed, False otherwise.
:param item: the item to attempt to add
:type item: Thing
:param preposition: the contains preposition the player wants to add the item with
(in/on/etc.)
:type preposition: str
"""
if preposition == self.contains_preposition:
if not item.playerAboutToMoveTo(self, event=event, **kwargs):
return False
return True
return super().playerAboutToAddItem(item, preposition, event=event, **kwargs)
def playerAddsItem(
self, item, preposition, event="turn", success_msg=None, **kwargs
):
"""
The result of a player trying to add an item to this item's contents.
If the player is attempting to add an item "in" this item (or, in the case of
customized contains preposition, if the the preposition matches that of this
item), we move the item to our `contains`.
Returns True on success, else False.
:param item: the item to attempt to add
:type item: Thing
:param preposition: the contains preposition the player wants to add the item with
(in/on/etc.)
:type preposition: str
"""
if success_msg:
self.game.addTextToEvent(event, success_msg)
return item.playerMovesTo(self, event=event, **kwargs)
class Openable(Thing):
"""
An item that can be opened.
Inheriting from this class means that instances can be made openable
"""
is_open_desc__true = "It is open. "
is_open_desc__false = "It is closed. "
IS_OPEN_DESC_KEY = "is_open_desc"
@property
def is_open_desc(self):
"""
Describes the objects open/closed state in words
"""
return self.is_open_desc__true if self.is_open else self.is_open_desc__false
@property
def is_locked_desc(self):
if not self.lock_obj:
return ""
return self.lock_obj.is_locked_desc
def playerAboutToOpen(self, event="turn"):
"""
Events run when the player is about to try to open this item.
Returns True to allow opening, and False to deny.
:param event: the event to print messages to
:type event: str
"""
if self.lock_obj and self.lock_obj.is_locked:
self.game.addTextToEvent(event, f"{self.capNameArticle(True)} is locked. ")
return False
return True
def playerOpens(self, event="turn"):
"""
Events run when the player tries to open this item. Returns True on success,
and False on failure.
:param event: the event to print messages to
:type event: str
"""
self.game.addTextToEvent(event, f"You open {self.lowNameArticle(True)}. ")
self.makeOpen()
return True
def makeOpen(self):
self.is_open = True
def makeClosed(self):
self.is_open = False
class Unremarkable(Thing):
"""
An item that does not need to be explicitly brought to the Player's attention
most of the time.
"""
invItem = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.known_ix = None
@property
def default_desc(self):
"""
By default, do not describe in the room description.
"""
return ""
class Surface(Holder):
"""Class for Things that can have other Things placed on them """
contains_preposition = "on"
contains_on = True
contains_preposition_inverse = "off"
can_contain_sitting_player = False
can_contain_standing_player = False
can_contain_lying_player = False
desc_reveal = True
class Container(Holder, Openable):
"""Things that can contain other Things """
holds_liquid = False
size = 50
desc_reveal = True
xdesc_reveal = True
contains_preposition = "in"
contains_in = True
contains_preposition_inverse = "out"
closed_msg = "The {self.verbose_name} is closed."
does_not_fit_msg = (
"The {item.verbose_name} is too big to fit inside the {self.verbose_name}. "
)
@property
def contains_desc(self):
"""
Describe the contents of an item
"""
if self.has_lid and not self.is_open:
return ""
return super().contains_desc
def tryOpeningLid(self, event="turn"):
"""
If the lid is currently closed, try to open it, returning False on failure.
Otherwise return True
:param event: the event name to print to
:type event: str
"""
if self.has_lid and not self.is_open:
self.game.addTextToEvent(
event, f"(First trying to open {self.lowNameArticle(True)})"
)
if not self.playerAboutToOpen(event=event) or not self.playerOpens(
event=event
):
return False
return True
def playerAboutToAddItem(self, item, preposition, event="turn", **kwargs):
"""
The prepartations we make when the player is about to try to add an item
to this item. Performs any implicit actions needed to add the item.
Returns True if the item addition is allowed, False otherwise.
:param item: the item to attempt to add
:type item: Thing
:param preposition: the contains preposition the player wants to add the item with
(in/on/etc.)
:type preposition: str
"""
if not self.tryOpeningLid(event=event):
return False
if item.size > self.size:
self.game.addTextToEvent(
event, self.does_not_fit_msg.format(self=self, item=item)
)
return False
existing_liquid = self.containsLiquid()
if existing_liquid and not getattr(item, "liquid_type", None):
self.game.addTextToEvent(
event,
f"{self.capNameArticle(True)} is already full of {existing_liquid.liquid_type}. ",
)
return False
return super().playerAboutToAddItem(item, preposition, event=event, **kwargs)
def playerAboutToRemoveItem(self, item, event="turn", **kwargs):
"""
Actions carried out when the player is about to try and remove an item contained
by this item.
:param event: the event name to print to
:type event: str
"""
if not self.tryOpeningLid(event=event):
return False
return True
def playerDumpsItems(self, event="turn", **kwargs):
"""
The result of a player trying to dump the items.
Returns True on success, else False.
:param event: the event name to print to
:type event: str
"""
if self.has_lid and not self.is_open:
self.game.addTextToEvent(event, self.closed_msg.format(self=self))
return False
return super().playerDumpsItems(event=event, **kwargs)
def playerAboutToLookIn(self, event="turn", **kwargs):
"""
Actions carried out when the player is about to try and look inside this item.
:param event: the event name to print to
:type event: str
"""
if not self.tryOpeningLid(event=event):
return False
return True
def playerLooksIn(self, event="turn", **kwargs):
"""
The result of a player trying to look in this item.
Returns True on success, else False.
:param event: the event name to print to
:type event: str
"""
self.game.addTextToEvent(event, self.contains_desc)
self.revealContents()
return True
def hideContents(self):
self.desc_reveal = True
self.xdesc_reveal = True
def setLock(self, lock_obj):
if not isinstance(lock_obj, Lock):
raise ValueError("Cannot set lock_obj for {self.verbose_name}: not a Lock")
if not self.has_lid:
raise ValueError(f"Cannot set lock_obj for {self.verbose_name}: no lid")
if lock_obj.parent_obj:
raise ValueError(
f"Cannot set lock_obj for {self.verbose_name}: lock_obj.parent already set"
)
self.lock_obj = lock_obj
lock_obj.parent_obj = self
if self.location:
self.location.addThing(lock_obj)
lock_obj.setAdjectives(lock_obj.adjectives + self.adjectives + [self.name])
self.state_descriptors.append(lock_obj.IS_LOCKED_DESC_KEY)
def giveLid(self):
self.has_lid = True
self.is_open = False
self.revealed = False
if not self.IS_OPEN_DESC_KEY in self.state_descriptors:
self.state_descriptors.append(self.IS_OPEN_DESC_KEY)
def makeOpen(self):
super().makeOpen()
self.revealContents()
def makeClosed(self):
super().makeClosed()
self.hideContents()
# NOTE: May not be necessary as a distinct class. Consider just using the wearable property.
class Clothing(Thing):
"""Class for Things that can be worn """
# all clothing is wearable
wearable = True
def playerAboutToWear(self, event="turn", **kwargs):
"""
Actions carried out when the player is about to wear this item
:param event: the event name to print to
:type event: str
"""
return True
def playerWears(self, event="turn", **kwargs):
"""
The result of a player trying to wear the item.
Returns True on success, else False.
:param event: the event name to print to
:type event: str
"""
self.game.addTextToEvent(event, f"You wear {self.lowNameArticle(True)}. ")
self.game.me.makeWearing(self)
return True
def playerAboutToDoff(self, event="turn", **kwargs):
"""
Actions carried out when the player is about to doff this item
:param event: the event name to print to
:type event: str
"""
return True
def playerDoffs(self, event="turn", **kwargs):
"""
The result of a player trying to doff the item.
Returns True on success, else False.
:param event: the event name to print to
:type event: str
"""
self.game.addTextToEvent(event, f"You take off {self.lowNameArticle(True)}. ")
self.game.me.removeWearing(self)
self.moveTo(self.game.me)
return True
class LightSource(Thing):
"""Class for Things that are light sources """
IS_LIT_DESC_KEY = "is_lit_desc"
is_lit = False
player_can_light = True
player_can_extinguish = True
consumable = False
turns_left = 20
room_lit_msg = None
light_msg = None
already_lit_msg = None
extinguish_msg = None
already_extinguished_msg = None
cannot_light_msg = None
cannot_extinguish_msg = None
cannot_light_expired_msg = None
extinguishing_expired_msg = None
expiry_warning = None
lit_desc = "It is currently lit. "
not_lit_desc = "It is currently not lit. "
expired_desc = "It is burnt out. "
def __init__(self, game, name):
"""
Set basic properties for the LightSource instance
Takes argument name, a single noun (string)
"""
super().__init__(game, name)
# LightSource properties
self.room_lit_msg = "The " + self.name + " lights | |
<filename>notebooks/Chap08PineSolns.py
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all
# formats: ipynb,py:percent
# notebook_metadata_filter: all,-language_info,-toc,-latex_envs
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Chapter 8 (Pine): Curve Fitting Solutions
# %% [markdown]
# ${\large\bf 1.}$ We linearize the equation $V(t)=V_0e^{\Gamma t}$ by taking the logarithm: $\ln V = \ln V_0 + \Gamma t$. Comparing with the equation for a straight line $Y = A + BX$, we see that
# $$
# \begin{align}
# Y &= \ln V \;,& X &= t \\
# A &= \ln V_0\;,& B &= \Gamma
# \end{align}
# $$
# %% [markdown]
# $\bf{(a)}$ & $\bf{(c)}$ There are two parts to this problem: (1) writing the fitting function with $\chi^2$ weighting and (2) transforming the data to linear form so that it can be fit to an exponential.
#
# The first part is done with the function ``LineFitWt(x, y, sig)``. There is also an ancillary function ``rechisq(x, y, dy, slope, yint)`` that calcuates the reduced chi-squared $\chi_r^2$ for a particular set of data & fitting parameters.
#
# The second part involves transforming the data and its uncertainties. This is done following the procedure described in *Introduction to Python for Science (by Pine)* in $\S 8.1.1$.
# %%
import numpy as np
import matplotlib.pyplot as plt
def LineFitWt(x, y, sig):
"""
Returns slope and y-intercept of weighted linear fit to
(x,y) data set.
Inputs: x and y data array and uncertainty array (unc)
for y data set.
Outputs: slope and y-intercept of best fit to data and
uncertainties of slope & y-intercept.
"""
sig2 = sig ** 2
norm = (1.0 / sig2).sum()
xhat = (x / sig2).sum() / norm
yhat = (y / sig2).sum() / norm
slope = ((x - xhat) * y / sig2).sum() / ((x - xhat) * x / sig2).sum()
yint = yhat - slope * xhat
sig2_slope = 1.0 / ((x - xhat) * x / sig2).sum()
sig2_yint = sig2_slope * (x * x / sig2).sum() / norm
return slope, yint, np.sqrt(sig2_slope), np.sqrt(sig2_yint)
def redchisq(x, y, dy, slope, yint):
chisq = (((y - yint - slope * x) / dy) ** 2).sum()
return chisq / float(x.size - 2)
# Read data from data file
t, V, dV = np.loadtxt("data/RLcircuit.txt", skiprows=2, unpack=True)
########## Code to tranform & fit data starts here ##########
# Transform data and parameters from ln V = ln V0 - Gamma t
# to linear form: Y = A + B*X, where Y = ln V, X = t, dY = dV/V
X = t # transform t data for fitting (not needed as X=t)
Y = np.log(V) # transform N data for fitting
dY = dV / V # transform uncertainties for fitting
# Fit transformed data X, Y, dY to obtain fitting parameters
# B & A. Also returns uncertainties dA & dB in B & A
B, A, dB, dA = LineFitWt(X, Y, dY)
# Return reduced chi-squared
redchisqr = redchisq(X, Y, dY, B, A)
# Determine fitting parameters for original exponential function
# N = N0 exp(-Gamma t) ...
V0 = np.exp(A)
Gamma = -B
# ... and their uncertainties
dV0 = V0 * dA
dGamma = dB
###### Code to plot transformed data and fit starts here ######
# Create line corresponding to fit using fitting parameters
# Only two points are needed to specify a straight line
Xext = 0.05 * (X.max() - X.min())
Xfit = np.array([X.min() - Xext, X.max() + Xext]) # smallest & largest X points
Yfit = B * Xfit + A # generates Y from X data &
# fitting function
plt.errorbar(X, Y, dY, fmt="b^")
plt.plot(Xfit, Yfit, "c-", zorder=-1)
plt.title(r"$\mathrm{Fit\ to:}\ \ln V = \ln V_0-\Gamma t$ or $Y = A + BX$")
plt.xlabel("time (ns)")
plt.ylabel("ln voltage (volts)")
plt.xlim(-50, 550)
plt.text(210, 1.5, u"A = ln V0 = {0:0.4f} \xb1 {1:0.4f}".format(A, dA))
plt.text(210, 1.1, u"B = -Gamma = {0:0.4f} \xb1 {1:0.4f} /ns".format(B, dB))
plt.text(210, 0.7, "$\chi_r^2$ = {0:0.3f}".format(redchisqr))
plt.text(210, 0.3, u"V0 = {0:0.2f} \xb1 {1:0.2f} V".format(V0, dV0))
plt.text(210, -0.1, u"Gamma = {0:0.4f} \xb1 {1:0.4f} /ns".format(Gamma, dGamma))
plt.show()
plt.savefig("RLcircuit.pdf")
# %% [markdown]
# $\bf{(b)}$ The value of $\chi_r^2$ returned by the fitting routine is $0.885$, which is near 1, so it seem that the error bars are about right and an exponential is a good model for the data.
# %% [markdown]
# ${\bf (d)}$ Starting from $\Gamma = R/L$ and assuming negligible uncertainty in $R$, we have
# $$\begin{align}
# L &= \frac{R}{\Gamma} = \frac{10^4~\Omega}{(0.0121~\text{ns}^{-1})(10^9~\text{ns/s})} = 8.24 \times 10^{-4}~\text{henry}
# = 824~\mu\text{H}\\
# \delta L &= \left|\frac{\partial L}{\partial \Gamma}\right|\delta\Gamma = \frac{R}{\Gamma^2}\delta\Gamma
# = L \frac{\delta\Gamma}{\Gamma} = 1.1 \times 10^{-5}~\text{henry} = 11~\mu\text{H}
# \end{align}$$
# Here are the calculations:
# %%
R = 10.0e3
Gamma *= 1.0e9 # convert Gamma from 1/ns to 1/s
L = R / Gamma
print("L = {0:0.2e} henry".format(L))
dGamma *= 1.0e9 # convert dGamma from 1/ns to 1/s
dL = L * (dGamma / Gamma)
print("dL = {0:0.1e} henry".format(dL))
# %% [markdown]
# ${\large\bf 2.}$ Here we want to use a linear fitting routine ($Y = A + BX$) to fit a power law model
# $$m = Kn^p\;,$$
# where $K$ and $p$ are fitting parameters. We transform the equation by taking the logarithm of both sides, which gives
# $$\ln m = \ln K + p\ln n\;.$$
# Thus, identifying the transformed variables as
# $$y=\ln m\;,\quad x=\ln n\;,$$
# and the $y$-intercept and slope and are given by $A=\ln K$ and $B=p$, respectively.
#
# The uncertainties in $y$ are related to those in $m$ by
# $$\delta y = \left| \frac{\partial y}{\partial m} \right|\delta m = \frac{\delta m}{m}$$
#
# The uncertainties in the fitting paramters follow from $K=e^A$ and $p=B$:
# $$ \delta K = e^A \delta A\;,\quad \delta p = \delta B\;.$$
#
# These transformations are implemented in the code below. We use the same fitting routine used in Problem 1 above.
# %%
import numpy as np
import matplotlib.pyplot as plt
def LineFitWt(x, y, sig):
"""
Returns slope and y-intercept of weighted linear fit to
(x,y) data set.
Inputs: x and y data array and uncertainty array (unc)
for y data set.
Outputs: slope and y-intercept of best fit to data and
uncertainties of slope & y-intercept.
"""
sig2 = sig ** 2
norm = (1.0 / sig2).sum()
xhat = (x / sig2).sum() / norm
yhat = (y / sig2).sum() / norm
slope = ((x - xhat) * y / sig2).sum() / ((x - xhat) * x / sig2).sum()
yint = yhat - slope * xhat
sig2_slope = 1.0 / ((x - xhat) * x / sig2).sum()
sig2_yint = sig2_slope * (x * x / sig2).sum() / norm
return slope, yint, np.sqrt(sig2_slope), np.sqrt(sig2_yint)
def redchisq(x, y, dy, slope, yint):
chisq = (((y - yint - slope * x) / dy) ** 2).sum()
return chisq / float(x.size - 2)
# Read data from data file
n, m, dm = np.loadtxt("data/Mass.txt", skiprows=4, unpack=True)
########## Code to tranform & fit data starts here ##########
# Transform data and parameters to linear form: Y = A + B*X
X = np.log(m) # transform t data for fitting
Y = np.log(n) # transform N data for fitting
dY = dm / m # transform uncertainties for fitting
# Fit transformed data X, Y, dY to obtain fitting parameters
# B & A. Also returns uncertainties dA & dB in B & A
B, A, dB, dA = LineFitWt(X, Y, dY)
# Return reduced chi-squared
redchisqr = redchisq(X, Y, dY, B, A)
# Determine fitting parameters for original exponential function
# N = N0 exp(-Gamma t) ...
p = B
K = np.exp(A)
# ... and their uncertainties
dp = dB
dK = np.exp(A) * dA
###### Code to plot transformed data and fit starts here ######
# Create line corresponding to fit using fitting parameters
# Only two points are needed to specify a straight line
Xext = 0.05 * (X.max() - X.min())
Xfit = np.array([X.min() - Xext, X.max() + Xext])
Yfit = B * Xfit + A # generates Y from X data &
# fitting function
plt.errorbar(X, Y, dY, fmt="gs")
plt.plot(Xfit, Yfit, "k-", zorder=-1)
plt.title(r"Fit to $\ln m=\ln K + p\, \ln n$ or $Y=A+BX$")
plt.xlabel(r"$\ln m$", fontsize=16)
plt.ylabel(r"$\ln n$", fontsize=16)
plt.text(10, 7.6, u"A = ln K = {0:0.1f} \xb1 {1:0.1f}".format(A, dA))
plt.text(10, 7.3, u"B = p = {0:0.2f} \xb1 {1:0.2f}".format(B, dB))
plt.text(10, 7.0, u"K = {0:0.1e} \xb1 {1:0.1e}".format(K, dK))
plt.text(10, 6.7, "$\chi_r^2$ = {0:0.3f}".format(redchisqr))
plt.show()
plt.savefig("Mass.pdf")
# %% [markdown]
# ${\large\bf 3.}$ (a)
# %%
import | |
fac.UpdateInstruction(cc_9, True) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_9, False) ]))
st_9._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_10, True) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_10, False) ]))
st_10._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_11, True) ]))
st_11._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Compound_._Automaton = _BuildAutomaton_16()
def _BuildAutomaton_17 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_17
del _BuildAutomaton_17
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 337, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 338, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 339, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 340, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 341, 6))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 342, 6))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 343, 6))
counters.add(cc_6)
cc_7 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 344, 6))
counters.add(cc_7)
cc_8 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 345, 6))
counters.add(cc_8)
cc_9 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 346, 6))
counters.add(cc_9)
cc_10 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 347, 6))
counters.add(cc_10)
cc_11 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 348, 6))
counters.add(cc_11)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'Container')), pyxb.utils.utility.Location(u'avm.xsd', 337, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'Property')), pyxb.utils.utility.Location(u'avm.xsd', 338, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'ComponentInstance')), pyxb.utils.utility.Location(u'avm.xsd', 339, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'Port')), pyxb.utils.utility.Location(u'avm.xsd', 340, 6))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'Connector')), pyxb.utils.utility.Location(u'avm.xsd', 341, 6))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'JoinData')), pyxb.utils.utility.Location(u'avm.xsd', 342, 6))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_6, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'Formula')), pyxb.utils.utility.Location(u'avm.xsd', 343, 6))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_7, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'ContainerFeature')), pyxb.utils.utility.Location(u'avm.xsd', 344, 6))
st_7 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_8, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'ResourceDependency')), pyxb.utils.utility.Location(u'avm.xsd', 345, 6))
st_8 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_9, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'DomainModel')), pyxb.utils.utility.Location(u'avm.xsd', 346, 6))
st_9 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_10, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'Resource')), pyxb.utils.utility.Location(u'avm.xsd', 347, 6))
st_10 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_11, False))
symbol = pyxb.binding.content.ElementUse(DesignSpaceContainer_._UseForTag(pyxb.namespace.ExpandedName(None, u'Classifications')), pyxb.utils.utility.Location(u'avm.xsd', 348, 6))
st_11 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_11)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_4, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_5, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_5, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_6, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_6, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_7, True) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_7, False) ]))
st_7._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_8, True) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_8, False) ]))
st_8._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_9, True) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_9, False) ]))
st_9._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_10, True) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_10, False) ]))
st_10._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_11, True) ]))
st_11._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
DesignSpaceContainer_._Automaton = _BuildAutomaton_17()
def _BuildAutomaton_18 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_18
del _BuildAutomaton_18
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location(u'avm.xsd', 538, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Parameter_._UseForTag(pyxb.namespace.ExpandedName(None, u'Value')), pyxb.utils.utility.Location(u'avm.xsd', 538, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Parameter_._Automaton = _BuildAutomaton_18()
def _BuildAutomaton_19 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_19
del _BuildAutomaton_19
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location(u'avm.xsd', 538, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Metric_._UseForTag(pyxb.namespace.ExpandedName(None, u'Value')), pyxb.utils.utility.Location(u'avm.xsd', 538, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Metric_._Automaton = _BuildAutomaton_19()
Connector_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Role'), Port_, scope=Connector_, location=pyxb.utils.utility.Location(u'avm.xsd', 161, 10)))
Connector_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Property'), Property_, scope=Connector_, location=pyxb.utils.utility.Location(u'avm.xsd', 162, 10)))
Connector_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'DefaultJoin'), _ImportedBinding__iFAB.assemblyDetail, scope=Connector_, location=pyxb.utils.utility.Location(u'avm.xsd', 163, 10)))
Connector_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Connector'), Connector_, scope=Connector_, location=pyxb.utils.utility.Location(u'avm.xsd', 164, 10)))
Connector_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'ConnectorFeature'), ConnectorFeature_, scope=Connector_, location=pyxb.utils.utility.Location(u'avm.xsd', 165, 10)))
def _BuildAutomaton_20 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_20
del _BuildAutomaton_20
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 161, 10))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 162, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 163, 10))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 164, 10))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 165, 10))
counters.add(cc_4)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Connector_._UseForTag(pyxb.namespace.ExpandedName(None, u'Role')), pyxb.utils.utility.Location(u'avm.xsd', 161, 10))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(Connector_._UseForTag(pyxb.namespace.ExpandedName(None, u'Property')), pyxb.utils.utility.Location(u'avm.xsd', 162, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(Connector_._UseForTag(pyxb.namespace.ExpandedName(None, u'DefaultJoin')), pyxb.utils.utility.Location(u'avm.xsd', 163, 10))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(Connector_._UseForTag(pyxb.namespace.ExpandedName(None, u'Connector')), pyxb.utils.utility.Location(u'avm.xsd', 164, 10))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(Connector_._UseForTag(pyxb.namespace.ExpandedName(None, u'ConnectorFeature')), pyxb.utils.utility.Location(u'avm.xsd', 165, 10))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Connector_._Automaton = _BuildAutomaton_20()
NormalDistribution_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Mean'), ValueExpressionType_, | |
value.
:vartype value: int
:ivar storage_limit: Storage limit.
:vartype storage_limit: ~azure.mgmt.sql.models.MaxSizeCapability
:ivar status: The status of the capability. Possible values include: "Visible", "Available",
"Default", "Disabled".
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
'storage_limit': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'int'},
'storage_limit': {'key': 'storageLimit', 'type': 'MaxSizeCapability'},
'status': {'key': 'status', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InstancePoolVcoresCapability, self).__init__(**kwargs)
self.name = None
self.value = None
self.storage_limit = None
self.status = None
self.reason = kwargs.get('reason', None)
class Job(ProxyResource):
"""A job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param description: User-defined description of the job.
:type description: str
:ivar version: The job version number.
:vartype version: int
:param schedule: Schedule properties of the job.
:type schedule: ~azure.mgmt.sql.models.JobSchedule
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'int'},
'schedule': {'key': 'properties.schedule', 'type': 'JobSchedule'},
}
def __init__(
self,
**kwargs
):
super(Job, self).__init__(**kwargs)
self.description = kwargs.get('description', "")
self.version = None
self.schedule = kwargs.get('schedule', None)
class JobAgent(TrackedResource):
"""An Azure SQL job agent.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The name and tier of the SKU.
:type sku: ~azure.mgmt.sql.models.Sku
:param database_id: Resource ID of the database to store job metadata in.
:type database_id: str
:ivar state: The state of the job agent. Possible values include: "Creating", "Ready",
"Updating", "Deleting", "Disabled".
:vartype state: str or ~azure.mgmt.sql.models.JobAgentState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'database_id': {'key': 'properties.databaseId', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobAgent, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.database_id = kwargs.get('database_id', None)
self.state = None
class JobAgentListResult(msrest.serialization.Model):
"""A list of Azure SQL job agents.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.JobAgent]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobAgent]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobAgentListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class JobAgentUpdate(msrest.serialization.Model):
"""An update to an Azure SQL job agent.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(JobAgentUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class JobCredential(ProxyResource):
"""A stored credential that can be used by a job to connect to target databases.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param username: The credential user name.
:type username: str
:param password: The credential password.
:type password: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'username': {'key': 'properties.username', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobCredential, self).__init__(**kwargs)
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
class JobCredentialListResult(msrest.serialization.Model):
"""A list of job credentials.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.JobCredential]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobCredential]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobCredentialListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class JobExecution(ProxyResource):
"""An execution of a job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar job_version: The job version number.
:vartype job_version: int
:ivar step_name: The job step name.
:vartype step_name: str
:ivar step_id: The job step id.
:vartype step_id: int
:ivar job_execution_id: The unique identifier of the job execution.
:vartype job_execution_id: str
:ivar lifecycle: The detailed state of the job execution. Possible values include: "Created",
"InProgress", "WaitingForChildJobExecutions", "WaitingForRetry", "Succeeded",
"SucceededWithSkipped", "Failed", "TimedOut", "Canceled", "Skipped".
:vartype lifecycle: str or ~azure.mgmt.sql.models.JobExecutionLifecycle
:ivar provisioning_state: The ARM provisioning state of the job execution. Possible values
include: "Created", "InProgress", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or ~azure.mgmt.sql.models.ProvisioningState
:ivar create_time: The time that the job execution was created.
:vartype create_time: ~datetime.datetime
:ivar start_time: The time that the job execution started.
:vartype start_time: ~datetime.datetime
:ivar end_time: The time that the job execution completed.
:vartype end_time: ~datetime.datetime
:param current_attempts: Number of times the job execution has been attempted.
:type current_attempts: int
:ivar current_attempt_start_time: Start time of the current attempt.
:vartype current_attempt_start_time: ~datetime.datetime
:ivar last_message: The last status or error message.
:vartype last_message: str
:ivar target: The target that this execution is executed on.
:vartype target: ~azure.mgmt.sql.models.JobExecutionTarget
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'job_version': {'readonly': True},
'step_name': {'readonly': True},
'step_id': {'readonly': True},
'job_execution_id': {'readonly': True},
'lifecycle': {'readonly': True},
'provisioning_state': {'readonly': True},
'create_time': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'current_attempt_start_time': {'readonly': True},
'last_message': {'readonly': True},
'target': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'job_version': {'key': 'properties.jobVersion', 'type': 'int'},
'step_name': {'key': 'properties.stepName', 'type': 'str'},
'step_id': {'key': 'properties.stepId', 'type': 'int'},
'job_execution_id': {'key': 'properties.jobExecutionId', 'type': 'str'},
'lifecycle': {'key': 'properties.lifecycle', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'create_time': {'key': 'properties.createTime', 'type': 'iso-8601'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'current_attempts': {'key': 'properties.currentAttempts', 'type': 'int'},
'current_attempt_start_time': {'key': 'properties.currentAttemptStartTime', 'type': 'iso-8601'},
'last_message': {'key': 'properties.lastMessage', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'JobExecutionTarget'},
}
def __init__(
self,
**kwargs
):
super(JobExecution, self).__init__(**kwargs)
self.job_version = None
self.step_name = None
self.step_id = None
self.job_execution_id = None
self.lifecycle = None
self.provisioning_state = None
self.create_time = None
self.start_time = None
self.end_time = None
self.current_attempts = kwargs.get('current_attempts', None)
self.current_attempt_start_time = None
self.last_message = None
self.target = None
class JobExecutionListResult(msrest.serialization.Model):
"""A list of job executions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.JobExecution]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobExecution]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobExecutionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class JobExecutionTarget(msrest.serialization.Model):
"""The target that a job execution is executed on.
Variables are only populated by the server, and will be ignored | |
<gh_stars>0
#!/usr/bin/env python3
# author: d.koch
# coding: utf-8
# naming: pep-0008
# typing: pep-0484
# docstring: pep-0257
# indentation: tabulation (4 spc)
""" enamlx-api.py
API list in copy/paste friendly format
"""
# How to use this file
# This file is *not* runnable
# This file is *not* an exhaustive documentation
# This file briefly describes the topology of the module
# This file briefly describes the different classes
# This file briefly describes the relationship between classes
# Check the 'Dependencies and relationship' part below
# Copy and paste the 'from ... import ...' and 'xxx:' parts into the enaml file
# Ensure relationship is preserved, indent as needed, adapt members and methods
# Check often using 'enaml-run file.enaml'
# Hierarchy and inheritance
# (atom) Atom
# widgets +-- Brush
# | color = ColorMember()
# | image = Instance(Image)
# | style = Enum('solid', 'dense1', 'dense2', 'dense3', 'dense4', 'dense5', 'dense6', 'dense7', 'horizontal', 'vertical', 'cross', 'diag', 'bdiag', 'fdiag', 'linear', 'radial', 'conical', 'texture', 'none')
# (enaml) +-- Object
# (enaml) | +-- Declarative
# (enaml) | +-- ToolkitObject
# widgets | +-- GraphicsItem
# | | | <GraphicsView
# | | | proxy = Typed(ProxyGraphicsItem)
# | | | position = PointMember()
# | | | rotation = Float(strict=False)
# | | | scale = Float(1.0, strict=False)
# | | | opacity = Float(1.0, strict=False)
# | | | selected = Bool()
# | | | enabled = Bool(True)
# | | | visible = Bool(True)
# | | | tool_tip = Str()
# | | | status_tip = Str()
# | | | features = Coerced(Feature.Flags)
# | | | extra_features = Coerced(GraphicFeature.Flags)
# | | | request_update = Event()
# | | | selectable = Bool()
# | | | movable = Bool()
# | | | #
# | | | self.show()
# | | | self.hide()
# | | | #
# | | | focus_gained => ():
# | | | focus_lost => ():
# | | | drag_start => ():
# | | | drag_end => (drag_data, result):
# | | | drag_enter => (event):
# | | | drag_move => (event):
# | | | drag_leave => ():
# | | | drop => (event):
# | | | mouse_press_event => (event):
# | | | mouse_move_event => (event):
# | | | mouse_release_event => (event):
# | | | wheel_event => (event):
# | | | draw => (painter, options, widget):
# | | +-- AbstractGraphicsShapeItem
# | | | | proxy = Typed(ProxyAbstractGraphicsShapeItem)
# | | | | pen = Instance(Pen)
# | | | | brush = Instance(Brush)
# widgets | | | +-- GraphicsEllipseItem
# | | | | proxy = Typed(ProxyGraphicsEllipseItem)
# | | | | width = Float(10.0, strict=False)
# | | | | height = Float(10.0, strict=False)
# | | | | span_angle = Float(360.0, strict=False)
# | | | | start_angle = Float(0.0, strict=False)
# widgets | | | +-- GraphicsLineItem
# | | | | proxy = Typed(ProxyGraphicsLineItem)
# | | | | point = List(PointMember())
# widgets | | | +-- GraphicsPathItem
# | | | | proxy = Typed(ProxyGraphicsPathItem)
# | | | | path = Value()
# widgets | | | +-- GraphicsPolygonItem
# | | | | proxy = Typed(ProxyGraphicsPolygonItem)
# | | | | points = List(PointMember())
# widgets | | | +-- GraphicsRectItem
# | | | | proxy = Typed(ProxyGraphicsRectItem)
# | | | | width = Float(10.0, strict=False)
# | | | | height = Float(10.0, strict=False)
# widgets | | | +-- GraphicsTextItem
# | | | proxy = Typed(ProxyGraphicsTextItem)
# | | | text = Str()
# | | | font = FontMember()
# widgets | | +-- GraphicsImageItem
# | | | proxy = Typed(ProxyGraphicsImageItem)
# | | | image = Instance(Image)
# widgets | | +-- GraphicsItemGroup
# | | | proxy = Typed(ProxyGraphicsItemGroup)
# widgets | | +-- GraphicsWidget
# | | proxy = Typed(ProxyGraphicsWidget)
# (enaml) | +-- Widget
# (enaml) | +-- ConstraintsWidget
# (enaml) | +-- Control
# | | +-- AbstractItemView
# | | | | hug_width = set_default('ignore')
# | | | | hug_height = set_default('ignore')
# | | | | items = ContainerList(default=[])
# | | | | selection_mode = Enum('extended', 'none', 'multi', 'single', 'contiguous')
# | | | | selection_behavior = Enum('items', 'rows', 'columns')
# | | | | selection = ContainerList(default=[])
# | | | | scroll_to_bottom = Bool(False)
# | | | | alternating_row_colors = Bool(False)
# | | | | cell_padding = Int(0)
# | | | | auto_resize = Bool(True)
# | | | | resize_mode = Enum('interactive', 'fixed', 'stretch', 'resize_to_contents', 'custom')
# | | | | word_wrap = Bool(False)
# | | | | show_vertical_header = Bool(True)
# | | | | vertical_headers = ContainerList()
# | | | | vertical_stretch = Bool(False)
# | | | | vertical_minimum_section_size = Int(0)
# | | | | show_horizontal_header = Bool(True)
# | | | | horizontal_headers << ContainerList()
# | | | | horizontal_stretch = Bool(False)
# | | | | horizontal_minimum_section_size = Int(0)
# | | | | sortable = Bool(True)
# | | | | current_row = Int(0)
# | | | | current_column = Int(0)
# | | | | visible_row = Int(0)
# | | | | visible_rows = Int(100)
# | | | | visible_column = Int(0)
# | | | | visible_columns = Int(1)
# widgets | | | +-- TableView
# | | | | proxy = Typed(ProxyTableView)
# | | | | show_grid = Bool(True)
# widgets | | | +-- TreeView
# | | | proxy = Typed(ProxyTreeView)
# | | | show_root = Bool(True)
# | | +-- AbstractWidgetItemGroup
# | | | | >AbstractWidgetItem
# | | | | clicked = d_(Event(), writable=False)
# | | | | double_clicked = d_(Event(), writable=False)
# | | | | entered = d_(Event(), writable=False)
# | | | | pressed = d_(Event(), writable=False)
# | | | | selection_changed = d_(Event(bool), writable=False)
# | | | +-- AbstractWidgetItem
# | | | | | row = d_(Int(), writable=False)
# | | | | | column = d_(Int(), writable=False)
# | | | | | text = d_(Str())
# | | | | | text_alignment = d_(Enum(*[(h, v) for h in ('left', 'right', 'center', 'justify') for v in ('center', 'top', 'bottom')]))
# | | | | | icon = d_(Typed(Icon))
# | | | | | icon_size = d_(Coerced(Size, (-1, -1)))
# | | | | | selectable = d_(Bool(True))
# | | | | | selected = d_(Bool())
# | | | | | checkable = d_(Bool())
# | | | | | checked = d_(Bool())
# | | | | | editable = d_(Bool())
# | | | | | changed = d_(Event(), writable=False)
# | | | | | toggled = d_(Event(bool), writable=False)
# widgets | | | | +-- TableViewItem
# | | | | | proxy = Typed(ProxyTableViewItem)
# widgets | | | | +-- TreeViewItem
# | | | | | <TreeViewItem
# | | | | | >TreeViewItem
# | | | | | proxy = Typed(ProxyTreeViewItem)
# | | | | | items = ContainerList(default=[])
# | | | | | visible_row = Int(0)
# | | | | | visible_rows = Int(100)
# | | | | | visible_column = Int(0)
# | | | | | visible_columns = Int(1)
# widgets | | | | +-- TreeViewColumn
# | | | | proxy = Typed(ProxyTreeViewColumn)
# widgets | | | +-- TableViewRow
# | | | | proxy = Typed(ProxyTableViewRow)
# | | | | row = Int()
# widgets | | | +-- TableViewColumn
# | | | proxy = Typed(ProxyTableViewColumn)
# | | | column = Int()
# widgets | | +-- GraphicsView
# | | | >GraphicsItem
# | | | proxy = Typed(ProxyGraphicsView)
# | | | hug_width = set_default('ignore')
# | | | hug_height = set_default('ignore')
# | | | renderer = Enum('default', 'opengl', 'qwidget')
# | | | antialiasing = Bool(True)
# | | | selected_items = List(GraphicsItem)
# | | | drag_mode = Enum('none', 'scroll', 'selection')
# | | | min_zoom = Float(0.007, strict=False)
# | | | max_zoom = Float(100.0, strict=False)
# | | | auto_range = Bool(False)
# | | | lock_aspect_ratio = Bool(True)
# | | | extra_features = Coerced(GraphicFeature.Flags)
# | | | #
# | | | self.get_item_at(*args, **kwargs)
# | | | self.fit_in_view(item)
# | | | self.center_on(item)
# | | | self.translate_view(x=0, y=0)
# | | | self.scale_view(x=1, y=1)
# | | | self.rotate_view(angle=0)
# | | | self.reset_view()
# | | | self.map_from_scene(point)
# | | | self.map_to_scene(point)
# | | | self.pixel_density(self)
# | | | #
# | | | wheel_event => (event):
# | | | mouse_press_event => (event):
# | | | mouse_move_event => (event):
# | | | mouse_release_event => (event):
# | | | draw_background => (painter, rect):
# widgets | | +-- KeyEvent
# | | | proxy = Typed(ProxyKeyEvent))
# | | | keys = List(str)
# | | | enabled = Bool(True)
# | | | repeats = Bool(True)
# widgets | | +-- OccViewer
# | | | proxy = Typed(ProxyOccViewer)
# | | | position = Tuple(Int(strict=False),default=(0,0))
# | | | display_mode = Enum('shaded','hlr','wireframe')
# | | | selection_mode = Enum('shape','neutral','face','edge','vertex')
# | | | selection = List()
# | | | view_mode = Enum('iso','top','bottom','left','right','front','rear')
# | | | trihedron_mode = Enum('right-lower','disabled')
# | | | background_gradient = Tuple(Int(),default=(206, 215, 222, 128, 128, 128))
# | | | double_buffer = Bool(True)
# | | | shadows = Bool(False)
# | | | reflections = Bool(True)
# | | | antialiasing = Bool(True)
# | | | hug_width = set_default('ignore')
# | | | hug_height = set_default('ignore')
# | | | #
# | | | on_key_press :: Event()
# | | | on_mouse_press :: Event()
# | | | on_mouse_release :: Event()
# | | | on_mouse_wheel :: Event()
# | | | on_mouse_move :: Event()
# | | +-- PlotItem
# | | | | title = Str()
# | | | | name = Str()
# | | | | row = Int(0)
# | | | | column = Int(0)
# | | | | line_pen = Instance(PEN_ARGTYPES)
# | | | | shadow_pen = Instance(PEN_ARGTYPES)
# | | | | fill_level = Float(strict=False)
# | | | | fill_brush = Instance(BRUSH_ARGTYPES)
# | | | | symbol = Enum(None, 'o', 's', 't', 'd', '+')
# | | | | symbol_size = Float(10, strict=False)
# | | | | symbol_pen = Instance(PEN_ARGTYPES)
# | | | | symbol_brush = Instance(BRUSH_ARGTYPES)
# | | | | show_legend = ContainerList()
# | | | | label_left = Str()
# | | | | label_right = Str()
# | | | | label_top = Str()
# | | | | label_bottom = Str()
# | | | | grid = Tuple(bool, default=(False, False))
# | | | | grid_alpha = FloatRange(low=0.0, high=1.0, value=0.5)
# | | | | multi_axis = Bool(True)
# | | | | axis_left_ticks = Callable()
# | | | | axis_bottom_ticks = Callable()
# | | | | log_mode = Tuple(bool, default=(False, False))
# | | | | antialias = Bool(False)
# | | | | auto_range = Enum(True, False)
# | | | | range_x = ContainerList(default=[0, 100])
# | | | | range_y = ContainerList(default=[0, 100])
# | | | | auto_downsample = Bool(False)
# | | | | clip_to_view = Bool(False)
# | | | | step_mode = Bool(False)
# | | | | aspect_locked = Bool(False)
# | | | | refresh_time = Int(100)
# widgets | | | +-- PlotItem2D
# | | | | | x = ContainerList()
# | | | | | y = ContainerList()
# widgets | | | | +-- PlotItem3D
# | | | | | | z = ContainerList()
# widgets | | | | | +-- PlotItemArray3D
# | | | | | type = Enum('line')
# | | | | | x = numpy_ndarray
# | | | | | y = numpy_ndarray
# | | | | | z = numpy_ndarray
# widgets | | | | +-- PlotItemArray
# | | | | x = numpy_ndarray
# | | | | y = numpy_ndarray
# | | | +-- AbstractDataPlotItem
# widgets | | | +-- PlotItemDict
# | | | | data = Dict(default={'x': [], 'y': []})
# widgets | | | +-- PlotItemList
# | | | data = ContainerList()
# (enaml) | | +-- SpinBox
# widgets | | +-- DoubleSpinBox
# | | decimals = Int(2)
# | | minimum = Float(0, strict=False)
# | | maximum = Float(100, strict=False)
# | | single_step = Float(1.0, strict=False)
# | | value = Float(0, strict=False)
# (enaml) | +-- Frame
# (enaml) | +-- Container
# widgets | +-- Console
# | | proxy = Typed(ProxyConsole)
# | | font_family = Str()
# | | font_size = Int(0)
# | | console_size = Coerced(Size,(81,25))
# | | buffer_size = Int(0)
# | | display_banner = Bool(False)
# | | completion = Enum('ncurses','plain', 'droplist')
# | | execute = Instance(object)
# widgets | +-- PlotArea
# | hug_width = set_default('ignore')
# | hug_height = set_default('ignore')
# | proxy = Typed(ProxyPlotArea)
# | setup = Callable(lambda graph: None)
# widgets +-- Pen
# | color = ColorMember()
# | width = Float(1.0, strict=False)
# | line_style = Enum('solid', 'dash', 'dot', 'dash_dot', 'dash_dot_dot', 'custom', 'none')
# | cap_style = Enum('square', 'flat', 'round')
# | join_style = Enum('bevel', 'miter', 'round')
# | dash_pattern = List(Float(strict=False))
# widgets +-- Point
# | x = Float(0, strict=False)
# | y = Float(0, strict=False)
# | z = Float(0, strict=False)
# widgets +-- Rect
# x = Float(0, strict=False)
# y = Float(0, strict=False)
# width = Float(0, strict=False)
# height = Float(0, strict=False)
# Dependencies and relationship
# (enaml)
# +-- (Container)
# +-- GraphicsView
# | | >Point
# | | >Rect
# | | background =
# | | drag_mode =
# | | minimum_size =
# | +-- (AbstractGraphicsShapeItem)
# | | >Brush
# | | >Pen
# | +-- (Menu)
# | +-- (Pattern)
# | | | iterable = range(...)
# | | | loop.index
# | | +-- GraphicsEllipseItem
# | | | height =
# | | | opacity =
# | | | pen =
# | | | position =
# | | | width =
# | | +-- GraphicsLineItem
# | | | point =
# | | | position =
# | | +-- GraphicsPathItem
# | | | pen =
# | | | movable =
# | | | path <<
# | | | scale =
# | | +-- GraphicsPolygonItem
# | | | points =
# | | | scale =
# | | +-- GraphicsRectItem
# | | | brush =
# | | | position =
# | | | opacity =
# | | | width =
# | | +-- GraphicsTextItem
# | | activated ::
# | | drag_start => ():
# | | drag_end => (drag_data, result):
# | | features =
# | | pen =
# | | position =
# | | rotation =
# | | selectable =
# | | text <<
# | +-- GraphicsImageItem
# | +-- GraphicsItemGroup
# | | +-- (Pattern)
# | | | iterable = range(...)
# | | | loop.index
# | | +-- (Graphics...Item)
# | +-- GraphicsWidget
# | | position =
# | | rotation =
# | +-- (Widget)
# +-- PlotArea
# | +-- PlotItemArray
# | | auto_range =
# | | background =
# | | label_left =
# | | label_right =
# | | line_pen =
# | | multi_axis =
# | | y <<
# | +-- (PlotItem...)
# | +-- (PlotItem...)
# +-- TableView: table:
# | | horizontal_headers <<
# | | horizontal_stretch = True
# | | items <<
# | | minimum_size =
# | +-- (Pattern)
# | | iterable <<
# | | loop.index
# | | loop.item
# | +-- TableViewRow
# | | row << table.items[self.row]
# | | clicked :=
# | +-- (Menu)
# | +-- TableViewItem
# | | checkable =
# | | checked :=
# | | clicked :=
# | | double_clicked :=
# | | selected :=
# | | icon <<
# | | text <<
# | +-- (Control)
# | +-- (Menu)
# | +-- (Action)
# | text <<
# | triggered ::
# +-- TreeView: tree:
# | horizontal_headers <<
# | items <<
# +-- (Pattern)
# | iterable << tree.items
# | loop.index
# | loop.item
# +-- TreeViewItem
# | icon <<
# | items <<
# | text <<
# +-- (TreeViewItem)
# +-- (TreeViewColumn)
# | checkable =
# | checked :=
# | icon <<
# | text <<
# +-- (Control)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# EnamlX : AbstractItemView
from enamlx.widgets.abstract_item_view import AbstractItemView
AbstractItemView:
hug_width = set_default('ignore')
hug_height = set_default('ignore')
items = ContainerList(default=[])
selection_mode = Enum('extended', 'none', 'multi', 'single', 'contiguous')
selection_behavior = Enum('items', 'rows', 'columns')
selection = ContainerList(default=[])
scroll_to_bottom = Bool(False)
alternating_row_colors = Bool(False)
cell_padding = Int(0)
auto_resize = Bool(True)
resize_mode = Enum('interactive', 'fixed', 'stretch', 'resize_to_contents', 'custom')
word_wrap = Bool(False)
show_vertical_header = Bool(True)
vertical_headers = ContainerList()
vertical_stretch = Bool(False)
vertical_minimum_section_size = Int(0)
show_horizontal_header = Bool(True)
horizontal_headers << ContainerList()
horizontal_stretch = Bool(False)
horizontal_minimum_section_size = Int(0)
sortable = Bool(True)
current_row = Int(0)
current_column = Int(0)
visible_row = Int(0)
visible_rows = Int(100)
visible_column = Int(0)
visible_columns = Int(1)
#Control:
proxy = Typed(ProxyControl)
#ConstraintsWidget:
hug_width = PolicyEnum('strong')
hug_height = PolicyEnum('strong')
resist_width = PolicyEnum('strong')
resist_height = PolicyEnum('strong')
limit_width = PolicyEnum('ignore')
limit_height = PolicyEnum('ignore')
proxy = Typed(ProxyConstraintsWidget)
#
self.request_relayout()
self.when(switch)
self.layout_constraints()
#Widget:
enabled = Bool(True)
visible = Bool(True)
background = ColorMember()
foreground = ColorMember()
font = FontMember()
minimum_size = Coerced(Size, (-1, -1))
maximum_size = Coerced(Size, (-1, -1))
tool_tip = Str()
status_tip = Str()
features = Coerced(Feature.Flags)
proxy = Typed(ProxyWidget)
#
self.restyle()
self.show(s)
self.hide()
#
next_focus_child => (current):
previous_focus_child => (current):
focus_gained => ():
focus_lost => ():
drag_start => ():
drag_end => (drag_data, result):
drag_enter => (event):
drag_move => (event):
drag_leave => ():
drop => (event):
#ToolkitObject:
activated :: Event()
proxy = Typed(ProxyToolkitObject)
proxy_is_active = flag_property(ACTIVE_PROXY_FLAG)
#
self.initialize()
self.destroy()
self.child_added(child)
self.child_removed(child)
self.activate_proxy()
self.activate_top_down()
self.activate_bottom_up()
#Declarative:
name = Str()
initialized :: Event()
is_initialized = flag_property()
#
self.initialize()
self.destroy()
self.child_added(child)
#Object:
name = Str()
parent = Object()
children = List(Object())
is_destroyed = flag_property(DESTROYED_FLAG)
destroyed :: Event()
#
parent_changed => (old, new):
child_added => (child):
child_moved => (child):
child_removed => (child):
#
self.destroy()
self.set_parent(parent)
self.insert_children(before, insert)
self.root_object()
self.traverse(depth_first=False)
self.traverse_ancestors(root=None)
self.find(name, regex=False)
self.find_all(name, regex=False)
#Atom:
self.__init__(**kwargs)
self.freeze()
self.get_member(member: str)
self.has_observer(member: str, func: Callable[[Dict[str, Any]], None])
self.has_observers(member: str)
self.notifications_enabled()
self.notify(member_name: str, *args, **kwargs)
self.observe(member: str, func: Callable[[Dict[str, Any]], None])
self.set_notifications_enabled(enabled: bool)
self.unobserve(member: str, func: Callable[[Dict[str, Any]], None])
self.__sizeof__()
#
# << (Control) :
aiview = AbstractItemView()
# <-> (set_default('ignore')) : Table should expand by default
hug_width = aiview.hug_width = hug_width
# <-> (set_default('ignore')) : Table should expand by default
hug_height = aiview.hug_height = hug_height
# <-> (ContainerList(default=[])) : The items to display in the view
items = aiview.items = items
# <-> (Enum('extended', 'none', 'multi', 'single', 'contiguous')) : Selection mode of the view
selection_mode = aiview.selection_mode = selection_mode
# <-> (Enum('items', 'rows', 'columns')) : Selection behavior of the view
selection_behavior = aiview.selection_behavior = selection_behavior
# <-> (ContainerList(default=[])) : Selection
selection = aiview.selection = selection
# <-> (Bool(False)) : Automatically scroll to bottm when new items are added
scroll_to_bottom = aiview.scroll_to_bottom = scroll_to_bottom
# <-> (Bool(False)) : Set alternating row colors
alternating_row_colors = aiview.alternating_row_colors = alternating_row_colors
# <-> (Int(0)) : Cell padding
cell_padding = aiview.cell_padding = cell_padding
# <-> (Bool(True)) : Automatically resize columns to fit contents
auto_resize = aiview.auto_resize = auto_resize
# <-> (Enum('interactive', 'fixed', 'stretch', 'resize_to_contents', 'custom')) : Resize mode of columns and rows
resize_mode = aiview.resize_mode = resize_mode
# <-> (Bool(False)) : Word wrap
word_wrap = aiview.word_wrap = word_wrap
# <-> (Bool(True)) : Show vertical header bar
show_vertical_header = aiview.show_vertical_header = show_vertical_header
# <-> (ContainerList()) : Row headers
vertical_headers = aiview.vertical_headers = vertical_headers
# <-> (Bool(False)) : Stretch last row
vertical_stretch = aiview.vertical_stretch = vertical_stretch
# <-> (Int(0)) : Minimum row size
vertical_minimum_section_size = aiview.vertical_minimum_section_size = vertical_minimum_section_size
# <-> (Bool(True)) : Show horizontal hearder bar
show_horizontal_header = aiview.show_horizontal_header = show_horizontal_header
# <-> (ContainerList()) : Column headers
horizontal_headers = aiview.horizontal_headers = horizontal_headers
# <-> (Bool(False)) : Stretch last column
horizontal_stretch = aiview.horizontal_stretch = horizontal_stretch
# <-> (Int(0)) : Minimum column size
horizontal_minimum_section_size = aiview.horizontal_minimum_section_size = horizontal_minimum_section_size
# <-> (Bool(True)) : Table is sortable
sortable = aiview.sortable = sortable
# <-> (Int(0)) : Current row index
current_row = aiview.current_row = current_row
# <-> (Int(0)) : Current column index
current_column = aiview.current_column = current_column
# <-> (Int(0)) : First visible row
visible_row = aiview.visible_row = visible_row
# <-> (Int(100)) : Number of rows visible
visible_rows = aiview.visible_rows = visible_rows
# <-> (Int(0)) : | |
# Boeing_737.py
#
# Created: Feb 2017, <NAME> (taken from data originally in B737/mission_B737.py, noise_optimization/Vehicles.py, and Boeing 737 tutorial script
# Modified: Jul 2017, <NAME>
# Mar 2020, <NAME>
# Oct 2021, <NAME>
""" setup file for the Boeing 737 vehicle
"""
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
import SUAVE
from SUAVE.Core import Units
from SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing
from SUAVE.Methods.Geometry.Two_Dimensional.Planform import segment_properties
from copy import deepcopy
# ----------------------------------------------------------------------
# Define the Vehicle
# ----------------------------------------------------------------------
def vehicle_setup():
# ------------------------------------------------------------------
# Initialize the Vehicle
# ------------------------------------------------------------------
vehicle = SUAVE.Vehicle()
vehicle.tag = 'Boeing_737800'
# ------------------------------------------------------------------
# Vehicle-level Properties
# ------------------------------------------------------------------
# mass properties
vehicle.mass_properties.max_takeoff = 79015.8 # kg
vehicle.mass_properties.takeoff = 79015.8 # kg
vehicle.mass_properties.operating_empty = 62746.4 # kg
vehicle.mass_properties.takeoff = 79015.8 # kg
vehicle.mass_properties.max_zero_fuel = 62732.0 # kg
vehicle.mass_properties.cargo = 10000. * Units.kilogram
vehicle.mass_properties.center_of_gravity = [[ 15.30987849, 0. , -0.48023939]]
vehicle.mass_properties.moments_of_inertia.tensor = [[3173074.17, 0 , 28752.77565],[0 , 3019041.443, 0],[0, 0, 5730017.433]] # estimated, not correct
vehicle.design_mach_number = 0.78
vehicle.design_range = 3582 * Units.miles
vehicle.design_cruise_alt = 35000.0 * Units.ft
# envelope properties
vehicle.envelope.ultimate_load = 3.75
vehicle.envelope.limit_load = 1.5
# basic parameters
vehicle.reference_area = 124.862
vehicle.passengers = 170
vehicle.systems.control = "fully powered"
vehicle.systems.accessories = "medium range"
# ------------------------------------------------------------------
# Main Wing
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Main_Wing()
wing.tag = 'main_wing'
wing.aspect_ratio = 10.18
wing.sweeps.quarter_chord = 25 * Units.deg
wing.thickness_to_chord = 0.1
wing.taper = 0.1
wing.spans.projected = 34.32
wing.chords.root = 7.760 * Units.meter
wing.chords.tip = 0.782 * Units.meter
wing.chords.mean_aerodynamic = 4.235 * Units.meter
wing.areas.reference = 124.862
wing.areas.wetted = 225.08
wing.twists.root = 4.0 * Units.degrees
wing.twists.tip = 0.0 * Units.degrees
wing.origin = [[13.61,0,-0.93]]
wing.aerodynamic_center = [0,0,0]
wing.vertical = False
wing.symmetric = True
wing.high_lift = True
wing.dynamic_pressure_ratio = 1.0
# Wing Segments
root_airfoil = SUAVE.Components.Airfoils.Airfoil()
root_airfoil.coordinate_file = '../Vehicles/Airfoils/B737a.txt'
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'Root'
segment.percent_span_location = 0.0
segment.twist = 4. * Units.deg
segment.root_chord_percent = 1.
segment.thickness_to_chord = 0.1
segment.dihedral_outboard = 2.5 * Units.degrees
segment.sweeps.quarter_chord = 28.225 * Units.degrees
segment.thickness_to_chord = .1
segment.append_airfoil(root_airfoil)
wing.append_segment(segment)
yehudi_airfoil = SUAVE.Components.Airfoils.Airfoil()
yehudi_airfoil.coordinate_file = '../Vehicles/Airfoils/B737b.txt'
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'Yehudi'
segment.percent_span_location = 0.324
segment.twist = 0.047193 * Units.deg
segment.root_chord_percent = 0.5
segment.thickness_to_chord = 0.1
segment.dihedral_outboard = 5.5 * Units.degrees
segment.sweeps.quarter_chord = 25. * Units.degrees
segment.thickness_to_chord = .1
segment.append_airfoil(yehudi_airfoil)
wing.append_segment(segment)
mid_airfoil = SUAVE.Components.Airfoils.Airfoil()
mid_airfoil.coordinate_file = '../Vehicles/Airfoils/B737c.txt'
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'Section_2'
segment.percent_span_location = 0.963
segment.twist = 0.00258 * Units.deg
segment.root_chord_percent = 0.220
segment.thickness_to_chord = 0.1
segment.dihedral_outboard = 5.5 * Units.degrees
segment.sweeps.quarter_chord = 56.75 * Units.degrees
segment.thickness_to_chord = .1
segment.append_airfoil(mid_airfoil)
wing.append_segment(segment)
tip_airfoil = SUAVE.Components.Airfoils.Airfoil()
tip_airfoil.coordinate_file = '../Vehicles/Airfoils/B737d.txt'
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'Tip'
segment.percent_span_location = 1.
segment.twist = 0. * Units.degrees
segment.root_chord_percent = 0.10077
segment.thickness_to_chord = 0.1
segment.dihedral_outboard = 0.
segment.sweeps.quarter_chord = 0.
segment.thickness_to_chord = .1
segment.append_airfoil(tip_airfoil)
wing.append_segment(segment)
# Fill out more segment properties automatically
wing = segment_properties(wing)
# control surfaces -------------------------------------------
slat = SUAVE.Components.Wings.Control_Surfaces.Slat()
slat.tag = 'slat'
slat.span_fraction_start = 0.2
slat.span_fraction_end = 0.963
slat.deflection = 0.0 * Units.degrees
slat.chord_fraction = 0.075
wing.append_control_surface(slat)
flap = SUAVE.Components.Wings.Control_Surfaces.Flap()
flap.tag = 'flap'
flap.span_fraction_start = 0.2
flap.span_fraction_end = 0.7
flap.deflection = 0.0 * Units.degrees
flap.configuration_type = 'double_slotted'
flap.chord_fraction = 0.30
wing.append_control_surface(flap)
aileron = SUAVE.Components.Wings.Control_Surfaces.Aileron()
aileron.tag = 'aileron'
aileron.span_fraction_start = 0.7
aileron.span_fraction_end = 0.963
aileron.deflection = 0.0 * Units.degrees
aileron.chord_fraction = 0.16
wing.append_control_surface(aileron)
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Horizontal Stabilizer
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Horizontal_Tail()
wing.tag = 'horizontal_stabilizer'
wing.aspect_ratio = 4.99
wing.sweeps.quarter_chord = 28.2250 * Units.deg
wing.thickness_to_chord = 0.08
wing.taper = 0.3333
wing.spans.projected = 14.4
wing.chords.root = 4.2731
wing.chords.tip = 1.4243
wing.chords.mean_aerodynamic = 8.0
wing.areas.reference = 41.49
wing.areas.exposed = 59.354 # Exposed area of the horizontal tail
wing.areas.wetted = 71.81 # Wetted area of the horizontal tail
wing.twists.root = 3.0 * Units.degrees
wing.twists.tip = 3.0 * Units.degrees
wing.origin = [[33.02,0,1.466]]
wing.aerodynamic_center = [0,0,0]
wing.vertical = False
wing.symmetric = True
wing.dynamic_pressure_ratio = 0.9
# Wing Segments
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'root_segment'
segment.percent_span_location = 0.0
segment.twist = 0. * Units.deg
segment.root_chord_percent = 1.0
segment.dihedral_outboard = 8.63 * Units.degrees
segment.sweeps.quarter_chord = 28.2250 * Units.degrees
segment.thickness_to_chord = .1
wing.append_segment(segment)
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'tip_segment'
segment.percent_span_location = 1.
segment.twist = 0. * Units.deg
segment.root_chord_percent = 0.3333
segment.dihedral_outboard = 0 * Units.degrees
segment.sweeps.quarter_chord = 0 * Units.degrees
segment.thickness_to_chord = .1
wing.append_segment(segment)
# Fill out more segment properties automatically
wing = segment_properties(wing)
# control surfaces -------------------------------------------
elevator = SUAVE.Components.Wings.Control_Surfaces.Elevator()
elevator.tag = 'elevator'
elevator.span_fraction_start = 0.09
elevator.span_fraction_end = 0.92
elevator.deflection = 0.0 * Units.deg
elevator.chord_fraction = 0.3
wing.append_control_surface(elevator)
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Vertical Stabilizer
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Vertical_Tail()
wing.tag = 'vertical_stabilizer'
wing.aspect_ratio = 1.98865
wing.sweeps.quarter_chord = 31.2 * Units.deg
wing.thickness_to_chord = 0.08
wing.taper = 0.1183
wing.spans.projected = 8.33
wing.total_length = wing.spans.projected
wing.chords.root = 10.1
wing.chords.tip = 1.20
wing.chords.mean_aerodynamic = 4.0
wing.areas.reference = 34.89
wing.areas.wetted = 57.25
wing.twists.root = 0.0 * Units.degrees
wing.twists.tip = 0.0 * Units.degrees
wing.origin = [[26.944,0,1.54]]
wing.aerodynamic_center = [0,0,0]
wing.vertical = True
wing.symmetric = False
wing.t_tail = False
wing.dynamic_pressure_ratio = 1.0
# Wing Segments
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'root'
segment.percent_span_location = 0.0
segment.twist = 0. * Units.deg
segment.root_chord_percent = 1.
segment.dihedral_outboard = 0 * Units.degrees
segment.sweeps.quarter_chord = 61.485 * Units.degrees
segment.thickness_to_chord = .1
wing.append_segment(segment)
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'segment_1'
segment.percent_span_location = 0.2962
segment.twist = 0. * Units.deg
segment.root_chord_percent = 0.45
segment.dihedral_outboard = 0. * Units.degrees
segment.sweeps.quarter_chord = 31.2 * Units.degrees
segment.thickness_to_chord = .1
wing.append_segment(segment)
segment = SUAVE.Components.Wings.Segment()
segment.tag = 'segment_2'
segment.percent_span_location = 1.0
segment.twist = 0. * Units.deg
segment.root_chord_percent = 0.1183
segment.dihedral_outboard = 0.0 * Units.degrees
segment.sweeps.quarter_chord = 0.0
segment.thickness_to_chord = .1
wing.append_segment(segment)
# Fill out more segment properties automatically
wing = segment_properties(wing)
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Fuselage
# ------------------------------------------------------------------
fuselage = SUAVE.Components.Fuselages.Fuselage()
fuselage.tag = 'fuselage'
fuselage.number_coach_seats = vehicle.passengers
fuselage.seats_abreast = 6
fuselage.seat_pitch = 31. * Units.inches
fuselage.fineness.nose = 1.6
fuselage.fineness.tail = 2.
fuselage.lengths.nose = 6.4
fuselage.lengths.tail = 8.0
fuselage.lengths.cabin = 28.85
fuselage.lengths.total = 38.02
fuselage.lengths.fore_space = 6.
fuselage.lengths.aft_space = 5.
fuselage.width = 3.74
fuselage.heights.maximum = 3.74
fuselage.heights.at_quarter_length = 3.74
fuselage.heights.at_three_quarters_length = 3.65
fuselage.heights.at_wing_root_quarter_chord = 3.74
fuselage.areas.side_projected = 142.1948
fuselage.areas.wetted = 385.51
fuselage.areas.front_projected = 12.57
fuselage.effective_diameter = 3.74
fuselage.differential_pressure = 5.0e4 * Units.pascal # Maximum differential pressure
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_0'
segment.percent_x_location = 0.0000
segment.percent_z_location = -0.00144
segment.height = 0.0100
segment.width = 0.0100
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_1'
segment.percent_x_location = 0.00576
segment.percent_z_location = -0.00144
segment.height = 0.7500
segment.width = 0.6500
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_2'
segment.percent_x_location = 0.02017
segment.percent_z_location = 0.00000
segment.height = 1.52783
segment.width = 1.20043
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_3'
segment.percent_x_location = 0.03170
segment.percent_z_location = 0.00000
segment.height = 1.96435
segment.width = 1.52783
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_4'
segment.percent_x_location = 0.04899
segment.percent_z_location = 0.00431
segment.height = 2.72826
segment.width = 1.96435
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_5'
segment.percent_x_location = 0.07781
segment.percent_z_location = 0.00861
segment.height = 3.49217
segment.width = 2.61913
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_6'
segment.percent_x_location = 0.10375
segment.percent_z_location = 0.01005
segment.height = 3.70130
segment.width = 3.05565
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_7'
segment.percent_x_location = 0.16427
segment.percent_z_location = 0.01148
segment.height = 3.92870
segment.width = 3.71043
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_8'
segment.percent_x_location = 0.22478
segment.percent_z_location = 0.01148
segment.height = 3.92870
segment.width = 3.92870
fuselage.Segments.append(segment)
# Segment
segment = SUAVE.Components.Lofted_Body_Segment.Segment()
segment.tag = 'segment_9'
segment.percent_x_location = 0.69164
segment.percent_z_location = 0.01292
segment.height = | |
tp == "\0"):
# ok
pass
else:
raise ValueError("invalid type with ','")
return False
def _calc_padding(self, string, length):
"""compute left and right padding, return total width of string"""
if self._width != -1 and length < self._width:
total = self._width
else:
total = length
align = self._align
if align == ">":
left = total - length
elif align == "^":
left = (total - length) / 2
elif align == "<" or align == "=":
left = 0
else:
raise AssertionError("shouldn't be here")
right = total - length - left
self._left_pad = left
self._right_pad = right
return total
def _lit(self, s):
return s
def _pad(self, string):
builder = self._builder()
builder.append_multiple_char(self._fill_char, self._left_pad)
builder.append(string)
builder.append_multiple_char(self._fill_char, self._right_pad)
return builder.build()
def _builder(self):
return StringBuilder()
def _unknown_presentation(self, tp):
msg = "unknown presentation for %s: '%s'"
w_msg = msg % (tp, self._type)
raise ValueError(w_msg)
def format_string(self, string):
if self._parse_spec("s", "<"):
return string
if self._type != "s":
self._unknown_presentation("string")
if self._sign != "\0":
msg = "Sign not allowed in string format specifier"
raise ValueError(msg)
if self._alternate:
msg = "Alternate form not allowed in string format specifier"
raise ValueError(msg)
if self._align == "=":
msg = "'=' alignment not allowed in string format specifier"
raise ValueError(msg)
length = len(string)
precision = self._precision
if precision != -1 and length >= precision:
assert precision >= 0
length = precision
string = string[:precision]
if self._fill_char == "\0":
self._fill_char = self._lit(" ")[0]
self._calc_padding(string, length)
return self._pad(string)
def _get_locale(self, tp):
if tp == "n":
dec, thousands, grouping = numeric_formatting()
elif self._thousands_sep:
dec = "."
thousands = ","
grouping = "\3\0"
else:
dec = "."
thousands = ""
grouping = "\256"
self._loc_dec = dec
self._loc_thousands = thousands
self._loc_grouping = grouping
def _calc_num_width(self, n_prefix, sign_char, to_number, n_number,
n_remainder, has_dec, digits):
"""Calculate widths of all parts of formatted number.
Output will look like:
<lpadding> <sign> <prefix> <spadding> <grouped_digits> <decimal>
<remainder> <rpadding>
sign is computed from self._sign, and the sign of the number
prefix is given
digits is known
"""
spec = NumberSpec()
spec.n_digits = n_number - n_remainder - has_dec
spec.n_prefix = n_prefix
spec.n_lpadding = 0
spec.n_decimal = int(has_dec)
spec.n_remainder = n_remainder
spec.n_spadding = 0
spec.n_rpadding = 0
spec.n_min_width = 0
spec.n_total = 0
spec.sign = "\0"
spec.n_sign = 0
sign = self._sign
if sign == "+":
spec.n_sign = 1
spec.sign = "-" if sign_char == "-" else "+"
elif sign == " ":
spec.n_sign = 1
spec.sign = "-" if sign_char == "-" else " "
elif sign_char == "-":
spec.n_sign = 1
spec.sign = "-"
extra_length = (spec.n_sign + spec.n_prefix + spec.n_decimal +
spec.n_remainder) # Not padding or digits
if self._fill_char == "0" and self._align == "=":
spec.n_min_width = self._width - extra_length
if self._loc_thousands:
self._group_digits(spec, digits[to_number:])
n_grouped_digits = len(self._grouped_digits)
else:
n_grouped_digits = spec.n_digits
n_padding = self._width - (extra_length + n_grouped_digits)
if n_padding > 0:
align = self._align
if align == "<":
spec.n_rpadding = n_padding
elif align == ">":
spec.n_lpadding = n_padding
elif align == "^":
spec.n_lpadding = n_padding // 2
spec.n_rpadding = n_padding - spec.n_lpadding
elif align == "=":
spec.n_spadding = n_padding
else:
raise AssertionError("shouldn't reach")
spec.n_total = spec.n_lpadding + spec.n_sign + spec.n_prefix + \
spec.n_spadding + n_grouped_digits + \
spec.n_decimal + spec.n_remainder + spec.n_rpadding
return spec
def _fill_digits(self, buf, digits, d_state, n_chars, n_zeros,
thousands_sep):
if thousands_sep:
for c in thousands_sep:
buf.append(c)
for i in range(d_state - 1, d_state - n_chars - 1, -1):
buf.append(digits[i])
for i in range(n_zeros):
buf.append("0")
def _group_digits(self, spec, digits):
buf = []
grouping = self._loc_grouping
min_width = spec.n_min_width
grouping_state = 0
count = 0
left = spec.n_digits
n_ts = len(self._loc_thousands)
need_separator = False
done = False
groupings = len(grouping)
previous = 0
while True:
group = ord(grouping[grouping_state])
if group > 0:
if group == 256:
break
grouping_state += 1
previous = group
else:
group = previous
final_grouping = min(group, max(left, max(min_width, 1)))
n_zeros = max(0, final_grouping - left)
n_chars = max(0, min(left, final_grouping))
ts = self._loc_thousands if need_separator else None
self._fill_digits(buf, digits, left, n_chars, n_zeros, ts)
need_separator = True
left -= n_chars
min_width -= final_grouping
if left <= 0 and min_width <= 0:
done = True
break
min_width -= n_ts
if not done:
group = max(max(left, min_width), 1)
n_zeros = max(0, group - left)
n_chars = max(0, min(left, group))
ts = self._loc_thousands if need_separator else None
self._fill_digits(buf, digits, left, n_chars, n_zeros, ts)
buf.reverse()
self._grouped_digits = self.empty.join(buf)
def _upcase_string(self, s):
buf = []
for c in s:
index = ord(c)
if ord("a") <= index <= ord("z"):
c = chr(index - 32)
buf.append(c)
return self.empty.join(buf)
def _fill_number(self, spec, num, to_digits, to_prefix, fill_char,
to_remainder, upper, grouped_digits=None):
out = self._builder()
if spec.n_lpadding:
out.append_multiple_char(fill_char[0], spec.n_lpadding)
if spec.n_sign:
sign = spec.sign
out.append(sign)
if spec.n_prefix:
pref = num[to_prefix:to_prefix + spec.n_prefix]
if upper:
pref = self._upcase_string(pref)
out.append(pref)
if spec.n_spadding:
out.append_multiple_char(fill_char[0], spec.n_spadding)
if spec.n_digits != 0:
if self._loc_thousands:
if grouped_digits is not None:
digits = grouped_digits
else:
digits = self._grouped_digits
assert digits is not None
else:
stop = to_digits + spec.n_digits
assert stop >= 0
digits = num[to_digits:stop]
if upper:
digits = self._upcase_string(digits)
out.append(digits)
if spec.n_decimal:
out.append(".")
if spec.n_remainder:
out.append(num[to_remainder:])
if spec.n_rpadding:
out.append_multiple_char(fill_char[0], spec.n_rpadding)
#if complex, need to call twice - just retun the buffer
return out.build()
def _format_int_or_long(self, w_num, kind):
if self._precision != -1:
msg = "precision not allowed in integer type"
raise ValueError(msg)
sign_char = "\0"
tp = self._type
if tp == "c":
if self._sign != "\0":
msg = "sign not allowed with 'c' presentation type"
raise ValueError(msg)
value = w_num
result = chr(value)
n_digits = 1
n_remainder = 1
to_remainder = 0
n_prefix = 0
to_prefix = 0
to_numeric = 0
else:
if tp == "b":
base = 2
skip_leading = 2
elif tp == "o":
base = 8
skip_leading = 2
elif tp == "x" or tp == "X":
base = 16
skip_leading = 2
elif tp == "n" or tp == "d":
base = 10
skip_leading = 0
else:
raise AssertionError("shouldn't reach")
if kind == INT_KIND:
result = self._int_to_base(base, w_num)
else:
result = self._int_to_base(base, w_num)
n_prefix = skip_leading if self._alternate else 0
to_prefix = 0
if result[0] == "-":
sign_char = "-"
skip_leading += 1
to_prefix += 1
n_digits = len(result) - skip_leading
n_remainder = 0
to_remainder = 0
to_numeric = skip_leading
self._get_locale(tp)
spec = self._calc_num_width(n_prefix, sign_char, to_numeric, n_digits,
n_remainder, False, result)
fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char
upper = self._type == "X"
return self._fill_number(spec, result, to_numeric,
to_prefix, fill, to_remainder, upper)
def _int_to_base(self, base, value):
if base == 10:
return str(value)
# This part is slow.
negative = value < 0
value = abs(value)
buf = ["\0"] * (8 * 8 + 6) # Too much on 32 bit, but who cares?
i = len(buf) - 1
while True:
div = value // base
mod = value - div * base
digit = abs(mod)
digit += ord("0") if digit < 10 else ord("a") - 10
buf[i] = chr(digit)
value = div
i -= 1
if not value:
break
if base == 2:
buf[i] = "b"
buf[i - 1] = "0"
elif base == 8:
buf[i] = "o"
buf[i - 1] = "0"
elif base == 16:
buf[i] = "x"
buf[i - 1] = "0"
else:
buf[i] = "#"
buf[i - 1] = chr(ord("0") + base % 10)
if base > 10:
buf[i - 2] = chr(ord("0") + base // 10)
i -= 1
i -= 1
if negative:
i -= 1
buf[i] = "-"
assert i >= 0
return self.empty.join(buf[i:])
def format_int_or_long(self, w_num, kind):
if self._parse_spec("d", ">"):
return self.space.str(w_num)
tp = self._type
if (tp == "b" or
tp == "c" or
tp == "d" or
tp == "o" or
tp == "x" or
tp == "X" or
tp == "n"):
return self._format_int_or_long(w_num, kind)
elif (tp == "e" or
tp == "E" or
tp == "f" or
tp == "F" or
tp == "g" or
tp == "G" or
tp == "%"):
w_float = | |
u"contributes. This assumes failure "
u"times are exponentially "
u"distributed."))
self.cmbAssembly.set_tooltip_text(_(u"Selects and displays the "
u"assembly associated with the "
u"data set."))
self.cmbDistribution.set_tooltip_text(_(u"Selects and displays the "
u"statistical distribution "
u"used to fit the data."))
self.cmbFitMethod.set_tooltip_text(_(u"Selects and displays the "
u"method used to fit the data to "
u"the selected distribution."))
self.cmbConfType.set_tooltip_text(_(u"Selects and displays the type "
u"of confidence bounds."))
self.cmbConfMethod.set_tooltip_text(_(u"Selects and displays the "
u"method for developing "
u"confidence bounds."))
self.txtDescription.set_tooltip_text(_(u"Description of the selected "
u"data set."))
self.txtConfidence.set_tooltip_text(_(u"Desired statistical "
u"confidence"))
self.txtStartTime.set_tooltip_text(_(u"Earliest failure time to use "
u"for calculating reliability "
u"metrics."))
self.txtEndTime.set_tooltip_text(_(u"Latest failure time to use for "
u"calculating reliability "
u"metrics."))
self.txtRelPoints.set_tooltip_text(_(u"Number of points at which to "
u"calculate reliability "
u"metrics."))
self.txtStartDate.set_tooltip_text(_(u"Earliest failure date to use "
u"for calculating reliability "
u"metrics."))
self.txtEndDate.set_tooltip_text(_(u"Latest failure date to use for "
u"calculating reliability "
u"metrics."))
# Connect gtk.Widget() signals to callback methods.
self.btnStartDate.connect('button-release-event',
Widgets.date_select, self.txtStartDate)
self.btnEndDate.connect('button-release-event', Widgets.date_select,
self.txtEndDate)
self._lst_handler_id.append(
self.cmbAssembly.connect('changed', self._on_combo_changed, 0))
self._lst_handler_id.append(
self.cmbDistribution.connect('changed', self._on_combo_changed, 1))
self._lst_handler_id.append(
self.cmbConfType.connect('changed', self._on_combo_changed, 2))
self._lst_handler_id.append(
self.cmbConfMethod.connect('changed', self._on_combo_changed, 3))
self._lst_handler_id.append(
self.cmbFitMethod.connect('changed', self._on_combo_changed, 4))
self._lst_handler_id.append(
self.txtDescription.connect('focus-out-event',
self._on_focus_out, 5))
self._lst_handler_id.append(
self.txtConfidence.connect('focus-out-event',
self._on_focus_out, 6))
self._lst_handler_id.append(
self.txtStartTime.connect('focus-out-event',
self._on_focus_out, 7))
self._lst_handler_id.append(
self.txtEndTime.connect('focus-out-event',
self._on_focus_out, 8))
self._lst_handler_id.append(
self.txtRelPoints.connect('focus-out-event',
self._on_focus_out, 9))
self._lst_handler_id.append(
self.txtStartDate.connect('focus-out-event',
self._on_focus_out, 10))
self.txtStartDate.connect('changed', self._on_focus_out, None, 10)
self._lst_handler_id.append(
self.txtEndDate.connect('focus-out-event', self._on_focus_out, 11))
self.txtEndDate.connect('changed', self._on_focus_out, None, 11)
# Put it all together.
_toolbar = self._create_toolbar()
self.pack_start(_toolbar, expand=False)
self._notebook = self._create_notebook()
self.pack_end(self._notebook)
self.show_all()
def _create_toolbar(self):
"""
Method to create the toolbar for the Survival class Work Book.
"""
_toolbar = gtk.Toolbar()
_position = 0
_button = gtk.ToolButton()
_image = gtk.Image()
_image.set_from_file(Configuration.ICON_DIR + '32x32/add.png')
_button.set_icon_widget(_image)
_button.connect('clicked', self._on_button_clicked, 0)
_button.set_tooltip_text(_(u"Add a new survival analysis to the open "
u"RTK Program database for the selected "
u"revision."))
_toolbar.insert(_button, _position)
_position += 1
_button = gtk.ToolButton()
_image = gtk.Image()
_image.set_from_file(Configuration.ICON_DIR + '32x32/remove.png')
_button.set_icon_widget(_image)
_button.connect('clicked', self._on_button_clicked, 1)
_button.set_tooltip_text(_(u"Remove the selected survival analysis "
u"from the open RTK Program database."))
_toolbar.insert(_button, _position)
_position += 1
# Calculate button.
_button = gtk.ToolButton()
_image = gtk.Image()
_image.set_from_file(Configuration.ICON_DIR + '32x32/calculate.png')
_button.set_icon_widget(_image)
_button.connect('clicked', self._on_button_clicked, 2)
_button.set_tooltip_text(_(u"Analyzes the selected survival "
u"analysis."))
_toolbar.insert(_button, _position)
_position += 1
# Save button.
_button = gtk.ToolButton()
_image = gtk.Image()
_image.set_from_file(Configuration.ICON_DIR + '32x32/save.png')
_button.set_icon_widget(_image)
_button.connect('clicked', self._on_button_clicked, 3)
_button.set_tooltip_text(_(u"Saves the selected survival analysis and "
u"it's records."))
_toolbar.insert(_button, _position)
_position += 1
# Save all button.
_button = gtk.ToolButton()
_image = gtk.Image()
_image.set_from_file(Configuration.ICON_DIR + '32x32/save-all.png')
_button.set_icon_widget(_image)
_button.connect('clicked', self._on_button_clicked, 4)
_button.set_tooltip_text(_(u"Saves all of the survival analyses and "
u"their records."))
_toolbar.insert(_button, _position)
_toolbar.show()
return _toolbar
def _create_notebook(self):
"""
Method to create the Survival class gtk.Notebook().
"""
_notebook = gtk.Notebook()
# Set the user's preferred gtk.Notebook tab position.
if Configuration.TABPOS[2] == 'left':
_notebook.set_tab_pos(gtk.POS_LEFT)
elif Configuration.TABPOS[2] == 'right':
_notebook.set_tab_pos(gtk.POS_RIGHT)
elif Configuration.TABPOS[2] == 'top':
_notebook.set_tab_pos(gtk.POS_TOP)
else:
_notebook.set_tab_pos(gtk.POS_BOTTOM)
self._create_analyses_input_page(_notebook)
for __, _dist in enumerate(self._lst_results):
_dist.create_results_page()
for __, _dist in enumerate(self._lst_plots):
_dist.create_plot_page()
return _notebook
def _create_analyses_input_page(self, notebook): # pylint: disable=R0914
"""
Method to create the Dataset class gtk.Notebook() page for displaying
assessment inputs for the selected data set.
:param gtk.Notebook notebook: the Dataset class gtk.Notebook() widget.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Build-up the containers for the tab. #
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
_hbox = gtk.HPaned()
_fixed = gtk.Fixed()
_frame = Widgets.make_frame(label=_(u"Analysis Inputs"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_IN)
_frame.add(_fixed)
_hbox.pack1(_frame, True, True)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Place the widgets used to display analysis input information. #
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Load the gtk.ComboBox() widgets.
_results = [[u"MCF"], [u"Kaplan-Meier"], [_(u"NHPP - Power Law")],
[u"NHPP - Loglinear"], [_(u"Exponential")],
[_(u"Lognormal")], [_(u"Normal")], [u"Weibull"],
["WeiBayes"]]
Widgets.load_combo(self.cmbDistribution, _results)
_results = [[_(u"Lower One-Sided")], [_(u"Upper One-Sided")],
[_(u"Two-Sided")]]
Widgets.load_combo(self.cmbConfType, _results)
_results = [[_(u"Crow (NHPP Only)")], [_(u"Duane (NHPP Only)")],
[_(u"Fisher Matrix")], [_(u"Likelihood")],
[_(u"Bootstrap")]]
Widgets.load_combo(self.cmbConfMethod, _results)
_results = [["MLE"], [_(u"Regression")]]
Widgets.load_combo(self.cmbFitMethod, _results)
# Create the labels for the left half of the right side.
_labels = [_(u"Assembly:"), _(u"Description:"), _(u"Distribution:"),
_("Fit Method:"), _(u"Confidence:"), _(u"Confidence Type:"),
_("Confidence Method:")]
(_x_pos1, _y_pos1) = Widgets.make_labels(_labels, _fixed, 5, 5)
_x_pos1 += 55
# Create the labels for the right half of the right side.
_labels = [_(u"Start Time:"), _(u"End Time:"), _(u"Step Interval:"),
_(u"Start Date:"), _(u"End Date:")]
(_x_pos2,
_y_pos2) = Widgets.make_labels(_labels, _fixed, _x_pos1 + 215, 5)
_x_pos2 += _x_pos1
_x_pos2 += 275
# Place widgets on the left side.
_fixed.put(self.cmbAssembly, _x_pos1, _y_pos1[0])
_fixed.put(self.txtDescription, _x_pos1, _y_pos1[1])
_fixed.put(self.cmbDistribution, _x_pos1, _y_pos1[2])
_fixed.put(self.cmbFitMethod, _x_pos1, _y_pos1[3])
_fixed.put(self.txtConfidence, _x_pos1, _y_pos1[4])
_fixed.put(self.cmbConfType, _x_pos1, _y_pos1[5])
_fixed.put(self.cmbConfMethod, _x_pos1, _y_pos1[6])
# Place widgets on the right side.
_fixed.put(self.txtStartTime, _x_pos2, _y_pos2[0])
_fixed.put(self.txtEndTime, _x_pos2, _y_pos2[1])
_fixed.put(self.txtRelPoints, _x_pos2, _y_pos2[2])
_fixed.put(self.txtStartDate, _x_pos2, _y_pos2[3])
_fixed.put(self.btnStartDate, _x_pos2 + 105, _y_pos2[3])
_fixed.put(self.txtEndDate, _x_pos2, _y_pos2[4])
_fixed.put(self.btnEndDate, _x_pos2 + 105, _y_pos2[4])
_fixed.put(self.chkGroup, _x_pos2, _y_pos2[4] + 30)
_fixed.put(self.chkParts, _x_pos2, _y_pos2[4] + 60)
_fixed.show_all()
# Insert the tab.
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"Analysis\nInputs") + "</span>")
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.set_justify(gtk.JUSTIFY_CENTER)
_label.show_all()
_label.set_tooltip_text(_(u"Displays analysis inputs for the selected "
u"dataset."))
notebook.insert_page(_hbox, tab_label=_label, position=-1)
return False
def load(self, model):
"""
Method to load the Survival class gtk.Notebook().
:param model: the :py:class:`rtk.survival.Survival.Model` whose
attributes will be loaded into the display widgets.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self._model = model
self._load_analysis_inputs_page()
# Remove existing results and plots pages.
if self._obj_results is not None:
self._notebook.remove_page(1)
if self._obj_plots is not None:
self._notebook.remove_page(1)
# Get the correct results and plots object for the selected s-model.
self._obj_results = self._lst_results[self._model.distribution_id - 1]
self._obj_plots = self._lst_plots[self._model.distribution_id - 1]
# Insert the s-model results and plots pages.
self._notebook.insert_page(self._obj_results,
tab_label=self._obj_results.lblPage,
position=1)
self._notebook.insert_page(self._obj_plots,
tab_label=self._obj_plots.lblPage,
position=2)
# Load the s-model results and plots pages.
self._obj_results.load_results_page(self._model)
self._obj_plots.load_plots(self._model)
self._notebook.show_all()
self._notebook.set_current_page(0)
return False
def _load_analysis_inputs_page(self):
"""
Method to load the gtk.Widgets() on the analysis inputs page.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# Load the gtk.ComboBox() with system hardware names.
self.cmbAssembly.handler_block(self._lst_handler_id[0])
Widgets.load_combo(self.cmbAssembly, Configuration.RTK_HARDWARE_LIST,
simple=False)
self.cmbAssembly.handler_unblock(self._lst_handler_id[0])
self.cmbAssembly.set_active(self._model.assembly_id)
self.cmbDistribution.set_active(self._model.distribution_id)
self.cmbConfType.set_active(self._model.confidence_type)
self.cmbConfMethod.set_active(self._model.confidence_method)
self.cmbFitMethod.set_active(self._model.fit_method)
self.txtDescription.set_text(self._model.description)
if self._model.confidence < 1.0:
_confidence = self._model.confidence * 100.0
else:
_confidence = self._model.confidence
self.txtConfidence.set_text(str(_confidence))
self.txtStartTime.set_text(str(self._model.start_time))
self.txtEndTime.set_text(str(self._model.rel_time))
self.txtRelPoints.set_text(str(self._model.n_rel_points))
_start_date = Utilities.ordinal_to_date(self._model.start_date)
_end_date = Utilities.ordinal_to_date(self._model.end_date)
self.txtStartDate.set_text(str(_start_date))
self.txtEndDate.set_text(str(_end_date))
return False
def update(self):
"""
Updates the Work Book widgets with changes to the Survival data model
attributes. Called by other views when the Survival data model
attributes are edited via their gtk.Widgets().
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self.cmbAssembly.handler_block(self._lst_handler_id[0])
self.cmbAssembly.set_active(self._model.assembly_id)
self.cmbAssembly.handler_unblock(self._lst_handler_id[0])
self.cmbDistribution.handler_block(self._lst_handler_id[1])
self.cmbDistribution.set_active(self._model.distribution_id)
self.cmbDistribution.handler_unblock(self._lst_handler_id[1])
self.cmbConfType.handler_block(self._lst_handler_id[2])
self.cmbConfType.set_active(self._model.confidence_type)
self.cmbConfType.handler_unblock(self._lst_handler_id[2])
self.cmbConfMethod.handler_block(self._lst_handler_id[3])
self.cmbConfMethod.set_active(self._model.confidence_method)
self.cmbConfMethod.handler_unblock(self._lst_handler_id[3])
self.cmbFitMethod.handler_block(self._lst_handler_id[4])
self.cmbFitMethod.set_active(self._model.fit_method)
self.cmbFitMethod.handler_unblock(self._lst_handler_id[4])
self.txtDescription.handler_block(self._lst_handler_id[5])
self.txtDescription.set_text(self._model.description)
self.txtDescription.handler_unblock(self._lst_handler_id[5])
self.txtConfidence.handler_block(self._lst_handler_id[6])
if self._model.confidence < 1.0:
Configurationidence = self._model.confidence * 100.0
else:
Configurationidence = self._model.confidence
self.txtConfidence.set_text(str(Configurationidence))
self.txtConfidence.handler_unblock(self._lst_handler_id[6])
self.txtStartTime.handler_block(self._lst_handler_id[7])
self.txtStartTime.set_text(str(self._model.start_time))
self.txtStartTime.handler_unblock(self._lst_handler_id[7])
self.txtEndTime.handler_block(self._lst_handler_id[8])
self.txtEndTime.set_text(str(self._model.rel_time))
self.txtEndTime.handler_unblock(self._lst_handler_id[8])
self.txtRelPoints.handler_block(self._lst_handler_id[9])
self.txtRelPoints.set_text(str(self._model.n_rel_points))
self.txtRelPoints.handler_unblock(self._lst_handler_id[9])
self.txtStartDate.handler_block(self._lst_handler_id[10])
_start_date = Utilities.ordinal_to_date(self._model.start_date)
self.txtStartDate.set_text(str(_start_date))
self.txtStartDate.handler_unblock(self._lst_handler_id[10])
self.txtEndDate.handler_block(self._lst_handler_id[11])
_end_date = Utilities.ordinal_to_date(self._model.end_date)
self.txtEndDate.set_text(str(_end_date))
self.txtEndDate.handler_unblock(self._lst_handler_id[11])
return False
def _on_button_clicked(self, __button, index):
"""
Method to respond to gtk.Button() 'clicked' signals and call the
correct function or method, passing any parameters as needed.
:param gtk.Button button: the gtk.Button() that called this method.
:param int index: the index in the handler ID list of the callback
signal associated with the gtk.Button() that called
this method.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
if index == 0:
self._mdcRTK.dtcSurvival.add_survival(self._model.revision_id)
self._modulebook.request_load_data()
elif index == 1:
self._mdcRTK.dtcSurvival.delete_survival(self._model.survival_id)
self._modulebook.request_load_data()
elif index == 2:
_survival_id = self._model.survival_id
if self._mdcRTK.dtcSurvival.request_calculate(_survival_id):
Widgets.rtk_error(_(u"Error calculating survival analysis."))
else:
self.load(self._model)
elif index == 3:
self._mdcRTK.dtcSurvival.save_survival(self._model.survival_id)
elif index == 4:
self._mdcRTK.dtcSurvival.save_all_survivals()
return False
def _on_combo_changed(self, combo, index):
"""
Method to respond to gtk.ComboBox() 'changed' signals.
:param gtk.ComboBox combo: the gtk.ComboBox() that called this method.
:param int index: the index in the handler ID list of the callback
signal associated with the gtk.ComboBox() that
called this method.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
combo.handler_block(self._lst_handler_id[index])
if index == 0: # Assembly ID
self._model.assembly_id = combo.get_active()
try:
_new_text = Configuration.RTK_HARDWARE_LIST[self._model.assembly_id][0]
except IndexError:
_new_text = ''
self._modulebook.update(1, str(_new_text))
elif index == 1: # Statistical distribution
self._model.distribution_id = combo.get_active()
self._modulebook.update(4, self._model.distribution_id)
elif index == 2: # Confidence type
self._model.confidence_type = combo.get_active()
self._modulebook.update(6, self._model.confidence_type)
elif index == 3: # Confidence method
self._model.confidence_method = combo.get_active()
self._modulebook.update(7, self._model.confidence_method)
elif index == | |
<filename>src/romt/toolchain.py
#!/usr/bin/env python3
# coding=utf-8
import argparse
from pathlib import Path
import re
import shutil
from typing import (
List,
Set,
Tuple,
)
from romt import base
from romt import common
from romt import dist
from romt import error
from romt import integrity
from romt import signature
from romt.manifest import Manifest, Package
description = """\
Mirror and manage toolchain artifacts.
"""
epilog = """\
SPEC is one of:
<channel>
<channel>-YYYY-MM-DD
<channel>-latest
<channel>-*
*-YYYY-MM-DD
*-latest
*-*
YYYY-MM-DD
latest
*
Where:
- <channel> is one of: nightly, beta, stable, X.Y.Z
- Single ``*`` represents date, not channel; equivalent to ``*-*``.
- Multiple SPEC options may be given.
- Each SPEC option will be split at commas and whitespace.
TARGET is standard 3- or 4-tuple; common examples:
- x86_64-unknown-linux-gnu (alias ``linux``)
- x86_64-pc-windows-msvc (alias ``windows``)
- x86_64-apple-darwin (alias ``darwin``)
COMMAND values:
Typical:
download download artifacts matching SPEC and TARGET to DEST
pack pack DEST artifacts matching SPEC and TARGET into ARCHIVE
unpack unpack ARCHIVE into DEST, setting SPEC and TARGET
Less common:
fetch-manifest download manifest matching SPEC and TARGET to DEST
verify verify DEST artifacts matching SPEC and TARGET
list print DEST artifacts and targets matching SPEC
all-targets print all known targets mentioned in SPEC
fixup publish manifest variants for SPEC, updating undated
When multiple COMMANDs are given, they share all option values.
For complete details, try ``romt --readme`` to view README.rst.
"""
def parse_spec(spec: str) -> Tuple[str, str]:
"""parse spec into (date, channel).
Forms with channel:
<channel>
<channel>-YYYY-MM-DD
<channel>-latest
<channel>-*
*-YYYY-MM-DD
*-latest
*-*
Forms with only date:
YYYY-MM-DD
latest
*
Where:
- <channel> is one of: nightly, beta, stable, X.Y.Z
- Single ``*`` represents date, not channel; equivalent to ``*-*``.
"""
# Single "*" is treated as wildcard for date, not channel.
if spec == "*":
return "*", "*"
channel_rex = r"""
(?P<channel>
nightly | beta | stable | \* | (?: \d+\.\d+\.\d+ )
)
"""
date_rex = r"""
(?P<date>
\d\d\d\d-\d\d-\d\d | latest | \*
)
"""
m = re.match(
r"{} (?: - {})? $".format(channel_rex, date_rex), spec, re.VERBOSE
)
if m:
channel = m.group("channel")
date = m.group("date") or ""
return date, channel
m = re.match(r"{} $".format(date_rex), spec, re.VERBOSE)
if m:
date = m.group("date")
return date, "*"
raise error.UsageError("invalid SPEC {}".format(repr(spec)))
def channel_rel_path(date: str, channel: str) -> str:
channel_name = "channel-rust-{}.toml".format(channel)
if date:
rel_path = "{}/{}".format(date, channel_name)
else:
rel_path = "{}".format(channel_name)
return rel_path
def add_arguments(parser: argparse.ArgumentParser) -> None:
base.add_downloader_arguments(parser)
parser.add_argument(
"-s",
"--select",
action="append",
dest="specs",
metavar="SPEC",
default=[],
help="""one or more SPEC values for toolchain selection""",
)
parser.add_argument(
"-t",
"--target",
action="append",
dest="targets",
default=[],
help="""target to download (default varies by COMMAND)""",
)
parser.add_argument(
"--dest",
action="store",
default="dist",
help="""local download directory (default: %(default)s)""",
)
parser.add_argument(
"--url",
action="store",
default="https://static.rust-lang.org/dist",
help="""base URL of dist (default: %(default)s)""",
)
parser.add_argument(
"--archive",
action="store",
metavar="ARCHIVE",
default="toolchain.tar.gz",
help="use archive ARCHIVE for pack/unpack (default: %(default)s)",
)
parser.add_argument(
"--warn-signature",
action="store_true",
default=False,
help="warn (instead of fail) on signature verification failure",
)
parser.add_argument(
"--no-signature",
action="store_true",
default=False,
help="disable all uses of signature files (*.asc); mainly for testing",
)
parser.add_argument(
"commands",
nargs="*",
metavar="COMMAND",
help="""commands to execute in the order given""",
)
class Main(dist.DistMain):
def __init__(self, args: argparse.Namespace) -> None:
super().__init__(args)
self.downloader.set_warn_signature(args.warn_signature)
self._with_sig = not args.no_signature
def manifest_url_path(self, date: str, channel: str) -> Tuple[str, Path]:
rel_path = channel_rel_path(date, channel)
manifest_url = self.url_from_rel_path(rel_path)
manifest_path = self.dest_path_from_rel_path(rel_path)
return manifest_url, manifest_path
def manifest_path(self, date: str, channel: str) -> Path:
_, man_path = self.manifest_url_path(date, channel)
return man_path
def get_manifest(
self, date: str, channel: str, *, download: bool
) -> Manifest:
man_url, man_path = self.manifest_url_path(date, channel)
if download:
if date:
# Dated manifests may always be cached.
cached = True
else:
# Undated manifests should always be re-downloaded. They
# might be newer than what's on disk (either because of the
# passage of time or because a fixup operation might overwrite
# the undated manifest with old information before a download).
cached = False
self.downloader.download_verify(
man_url, man_path, cached=cached, with_sig=self._with_sig
)
else:
self.downloader.verify(man_path, with_sig=self._with_sig)
return Manifest.from_toml_path(man_path)
def select_manifest(
self, spec: str, *, download: bool, canonical: bool = False
) -> Manifest:
date, channel = parse_spec(spec)
manifest = self.get_manifest(date, channel, download=download)
if canonical and (
manifest.date != date or manifest.channel != channel
):
manifest = self.get_manifest(
manifest.date, manifest.channel, download=download
)
return manifest
def channels_in_dest_date(self, date: str) -> List[str]:
date_path = self.dest_path / date
prefix = "channel-rust-"
suffix = ".toml"
channels = [
p.name[len(prefix) : -len(suffix)]
for p in date_path.glob(prefix + "*" + suffix)
]
return channels
def adjust_download_specs(self, specs: List[str]) -> List[str]:
# For downloads, require explicit date and channel.
for spec in specs:
date, channel = parse_spec(spec)
if "*" in (date, channel) or date == "latest":
raise error.UsageError("invalid wild SPEC: {}".format(spec))
return dist.require_specs(specs)
def expand_wild_spec(self, spec: str) -> List[str]:
specs = [] # type: List[str]
date, channel = parse_spec(spec)
if "*" in (date, channel) or date == "latest":
if channel == "*":
channel_patterns = set("nightly beta stable".split())
else:
channel_patterns = set([channel])
if date in ("*", "latest"):
dates = common.reversed_date_dir_names(self.dest_path)
else:
dates = [date]
for d in dates:
channels = channel_patterns.intersection(
self.channels_in_dest_date(d)
)
specs.extend(
"{}-{}".format(channel, d) for channel in channels
)
if date == "latest" and specs:
break
else:
specs.append(spec)
if not specs:
raise error.UsageError(
"no matches for wild SPEC {}".format(repr(spec))
)
return specs
def adjust_wild_specs(self, specs: List[str]) -> List[str]:
# For non-downloads, handle wild specs.
adjusted_specs = []
for spec in specs:
adjusted_specs.extend(self.expand_wild_spec(spec))
return dist.require_specs(adjusted_specs)
def downloaded_packages(self, manifest: Manifest) -> List[Package]:
packages = []
for package_info in manifest.gen_available_packages():
dest_path = self.dest_path_from_rel_path(package_info.rel_path)
if dest_path.is_file():
packages.append(package_info)
return packages
def downloaded_targets(self, manifest: Manifest) -> List[str]:
targets = set(p.target for p in self.downloaded_packages(manifest))
targets.discard("*")
return sorted(targets)
def adjust_targets(
self, manifest: Manifest, base_targets: List[str]
) -> List[str]:
possible_targets = set(p.target for p in manifest.gen_packages())
targets = set()
for target in base_targets:
if target == "all":
targets.update(possible_targets)
elif target == "*":
targets.update(self.downloaded_targets(manifest))
elif target not in possible_targets:
raise error.UsageError(
"target {} not found in manifest".format(repr(target))
)
else:
targets.add(target)
return sorted(targets)
def cmd_fetch_manifest(self) -> None:
for spec in self.adjust_download_specs(self.specs):
common.iprint("Fetch manifest: {}".format(spec))
manifest = self.select_manifest(spec, download=True)
common.iprint(" ident: {}".format(manifest.ident))
def _download_verify(
self, download: bool, specs: List[str], base_targets: List[str]
) -> None:
for spec in specs:
common.iprint(
"{}: {}".format("Download" if download else "Verify", spec)
)
manifest = self.select_manifest(
spec, download=download, canonical=True
)
common.iprint(" ident: {}".format(manifest.ident))
targets = self.adjust_targets(manifest, base_targets)
packages = list(manifest.gen_available_packages(targets=targets))
common.iprint(
" packages: {}, targets: {}".format(
len(packages), len(targets)
)
)
for t in targets:
common.vvprint(" target: {}".format(t))
for package in packages:
rel_path = package.rel_path
dest_path = self.dest_path_from_rel_path(rel_path)
dest_url = self.url_from_rel_path(rel_path)
if download:
self.downloader.download_verify(
dest_url,
dest_path,
assume_ok=self.args.assume_ok,
with_sig=self._with_sig,
)
else:
self.downloader.verify(dest_path, with_sig=self._with_sig)
def cmd_download(self) -> None:
specs = self.adjust_download_specs(self.specs)
base_targets = dist.require_targets(self.targets)
self._download_verify(
download=True, specs=specs, base_targets=base_targets
)
def cmd_verify(self) -> None:
specs = self.adjust_wild_specs(self.specs)
base_targets = dist.require_targets(self.targets, default="*")
self._download_verify(
download=False, specs=specs, base_targets=base_targets
)
def cmd_list(self) -> None:
max_verbosity = common.get_max_verbosity()
show_details = max_verbosity >= common.VERBOSITY_INFO
for spec in self.adjust_wild_specs(self.specs):
common.vprint("List: {}".format(spec))
manifest = self.select_manifest(spec, download=False)
if show_details:
available_packages = manifest.available_packages()
available_targets = manifest.available_targets()
packages = self.downloaded_packages(manifest)
targets = self.downloaded_targets(manifest)
target_out = "targets[{}/{}]".format(
len(targets), len(available_targets),
)
package_out = "packages[{}/{}]".format(
len(packages), len(available_packages),
)
# Example output:
# stable-2020-01-30(1.41.0) \
# targets[84/84], packages[272/326]
common.iprint(
"{:28} {:16} {:18}".format(
manifest.ident, target_out, package_out
)
)
for target in targets:
common.iprint(" {}".format(target))
else:
common.eprint(manifest.ident)
def cmd_all_targets(self) -> None:
for spec in self.adjust_wild_specs(self.specs):
common.iprint("All targets: {}".format(spec))
manifest = self.select_manifest(spec, download=False)
common.iprint(" ident: {}".format(manifest.ident))
for target in manifest.all_targets():
common.eprint(target)
def cmd_pack(self) -> None:
base_targets = dist.require_targets(self.targets, default="*")
archive_path = self.get_archive_path()
common.iprint("Packing archive: {}".format(archive_path))
with common.tar_context(archive_path, "w") as tar_f:
def pack_path(rel_path: str) -> None:
dest_path = self.dest_path_from_rel_path(rel_path)
packed_name = "dist/" + rel_path
common.vprint("[pack] {}".format(rel_path))
try:
tar_f.add(str(dest_path), packed_name)
except FileNotFoundError:
raise error.MissingFileError(str(dest_path))
def pack_rel_path(rel_path: str) -> None:
pack_path(rel_path)
pack_path(integrity.append_hash_suffix(rel_path))
if self._with_sig:
pack_path(signature.append_sig_suffix(rel_path))
for spec in self.adjust_wild_specs(self.specs):
common.iprint("Pack: {}".format(spec))
manifest = self.select_manifest(
spec, download=False, canonical=True
)
common.iprint(" ident: {}".format(manifest.ident))
targets = self.adjust_targets(manifest, base_targets)
packages = list(
manifest.gen_available_packages(targets=targets)
)
common.iprint(
" packages: {}, targets: {}".format(
len(packages), len(targets)
)
)
for t in targets:
common.vvprint(" target: {}".format(t))
# Pack channel file.
pack_rel_path(
channel_rel_path(manifest.date, manifest.channel)
)
# Pack up package file parts.
for package in packages:
pack_rel_path(package.rel_path)
def _detect_specs(self, rel_paths: Set[str]) -> List[str]:
specs = []
for rel_path in rel_paths:
m = re.match(
r"""
(?P<date>\d\d\d\d-\d\d-\d\d)
/channel-rust-
| |
IoT hub applies to messages to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid
:param pulumi.Input['FallbackRoutePropertiesArgs'] fallback_route: The properties of the route that is used as a fall-back route when none of the conditions specified in the 'routes' section are met. This is an optional parameter. When this property is not set, the messages which do not meet any of the conditions specified in the 'routes' section get routed to the built-in eventhub endpoint.
:param pulumi.Input[Sequence[pulumi.Input['RoutePropertiesArgs']]] routes: The list of user-provided routing rules that the IoT hub uses to route messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and a maximum of 5 routing rules are allowed for free hubs.
"""
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if enrichments is not None:
pulumi.set(__self__, "enrichments", enrichments)
if fallback_route is not None:
pulumi.set(__self__, "fallback_route", fallback_route)
if routes is not None:
pulumi.set(__self__, "routes", routes)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input['RoutingEndpointsArgs']]:
"""
The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input['RoutingEndpointsArgs']]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def enrichments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnrichmentPropertiesArgs']]]]:
"""
The list of user-provided enrichments that the IoT hub applies to messages to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid
"""
return pulumi.get(self, "enrichments")
@enrichments.setter
def enrichments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnrichmentPropertiesArgs']]]]):
pulumi.set(self, "enrichments", value)
@property
@pulumi.getter(name="fallbackRoute")
def fallback_route(self) -> Optional[pulumi.Input['FallbackRoutePropertiesArgs']]:
"""
The properties of the route that is used as a fall-back route when none of the conditions specified in the 'routes' section are met. This is an optional parameter. When this property is not set, the messages which do not meet any of the conditions specified in the 'routes' section get routed to the built-in eventhub endpoint.
"""
return pulumi.get(self, "fallback_route")
@fallback_route.setter
def fallback_route(self, value: Optional[pulumi.Input['FallbackRoutePropertiesArgs']]):
pulumi.set(self, "fallback_route", value)
@property
@pulumi.getter
def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RoutePropertiesArgs']]]]:
"""
The list of user-provided routing rules that the IoT hub uses to route messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and a maximum of 5 routing rules are allowed for free hubs.
"""
return pulumi.get(self, "routes")
@routes.setter
def routes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RoutePropertiesArgs']]]]):
pulumi.set(self, "routes", value)
@pulumi.input_type
class RoutingServiceBusQueueEndpointPropertiesArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
authentication_type: Optional[pulumi.Input[Union[str, 'AuthenticationType']]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
endpoint_uri: Optional[pulumi.Input[str]] = None,
entity_path: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ManagedIdentityArgs']] = None,
resource_group: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None):
"""
The properties related to service bus queue endpoint types.
:param pulumi.Input[str] name: The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual queue name.
:param pulumi.Input[Union[str, 'AuthenticationType']] authentication_type: Method used to authenticate against the service bus queue endpoint
:param pulumi.Input[str] connection_string: The connection string of the service bus queue endpoint.
:param pulumi.Input[str] endpoint_uri: The url of the service bus queue endpoint. It must include the protocol sb://
:param pulumi.Input[str] entity_path: Queue name on the service bus namespace
:param pulumi.Input[str] id: Id of the service bus queue endpoint
:param pulumi.Input['ManagedIdentityArgs'] identity: Managed identity properties of routing service bus queue endpoint.
:param pulumi.Input[str] resource_group: The name of the resource group of the service bus queue endpoint.
:param pulumi.Input[str] subscription_id: The subscription identifier of the service bus queue endpoint.
"""
pulumi.set(__self__, "name", name)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if endpoint_uri is not None:
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
if entity_path is not None:
pulumi.set(__self__, "entity_path", entity_path)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual queue name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> Optional[pulumi.Input[Union[str, 'AuthenticationType']]]:
"""
Method used to authenticate against the service bus queue endpoint
"""
return pulumi.get(self, "authentication_type")
@authentication_type.setter
def authentication_type(self, value: Optional[pulumi.Input[Union[str, 'AuthenticationType']]]):
pulumi.set(self, "authentication_type", value)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[pulumi.Input[str]]:
"""
The connection string of the service bus queue endpoint.
"""
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> Optional[pulumi.Input[str]]:
"""
The url of the service bus queue endpoint. It must include the protocol sb://
"""
return pulumi.get(self, "endpoint_uri")
@endpoint_uri.setter
def endpoint_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint_uri", value)
@property
@pulumi.getter(name="entityPath")
def entity_path(self) -> Optional[pulumi.Input[str]]:
"""
Queue name on the service bus namespace
"""
return pulumi.get(self, "entity_path")
@entity_path.setter
def entity_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_path", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the service bus queue endpoint
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedIdentityArgs']]:
"""
Managed identity properties of routing service bus queue endpoint.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group of the service bus queue endpoint.
"""
return pulumi.get(self, "resource_group")
@resource_group.setter
def resource_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The subscription identifier of the service bus queue endpoint.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@pulumi.input_type
class RoutingServiceBusTopicEndpointPropertiesArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
authentication_type: Optional[pulumi.Input[Union[str, 'AuthenticationType']]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
endpoint_uri: Optional[pulumi.Input[str]] = None,
entity_path: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ManagedIdentityArgs']] = None,
resource_group: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None):
"""
The properties related to service bus topic endpoint types.
:param pulumi.Input[str] name: The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual topic name.
:param pulumi.Input[Union[str, 'AuthenticationType']] authentication_type: Method used to authenticate against the service bus topic endpoint
:param pulumi.Input[str] connection_string: The connection string of the service bus topic endpoint.
:param pulumi.Input[str] endpoint_uri: The url of the service bus topic endpoint. It must include the protocol sb://
:param pulumi.Input[str] entity_path: Queue name on the service bus topic
:param pulumi.Input[str] id: Id of the service bus topic endpoint
:param pulumi.Input['ManagedIdentityArgs'] identity: Managed identity properties of routing service bus topic endpoint.
:param pulumi.Input[str] resource_group: The name of the resource group of the service bus topic endpoint.
:param pulumi.Input[str] subscription_id: The subscription identifier of the service bus topic endpoint.
"""
pulumi.set(__self__, "name", name)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if endpoint_uri is not None:
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
if entity_path is not None:
pulumi.set(__self__, "entity_path", entity_path)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The | |
return [xxh(value).hexdigest() for value in values]
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
return [l[i:i + n] for i in range(0, len(l), n)]
def sos_pair_input(value):
'''Input must be a list of two lists,
the lists are ordered such that the length of the
2nd list is always multiples of the previous list.
The way lists are supposed to be combined is:
ABCD ABCDABCD
ABCDEFGH -------> ABCDEFGH ------> ABCDABCDABCDEFGH
Input can also be a flat list of 2N length in which case
I'll pair the first N with the 2nd N
'''
if len(value) == 2 and isinstance(value[0], (list, tuple)):
# Input is a pair of vectors
multiplier = len(value[1]) / len(value[0])
if multiplier > int(multiplier):
# is not integer
raise ValueError(
'Length of the 2nd list must be multiple of the 1st.')
else:
multiplier = int(multiplier)
value[0] = flatten_list([value[0] for y in range(multiplier)])
else:
if not len(value):
return []
if isinstance(value[0], (list, tuple)):
raise ValueError(
"Input must be a pair of vectors or flat vectors!")
else:
# cut by half, by default
if len(value) % 2:
raise ValueError("Invalid input to pair!")
else:
value = list(zip(*[x for x in chunks(value, 2)]))
return flatten_list(value)
def sos_group_input_safe(value):
'''
Input is a list of lists or tuples. Lists are ordered such that
the length of the next list is always multiples of the previous
ABCD ABCDABCD
ABCDEFGH -------> ABCDEFGH ------> AABBCCDDAEBFCGDH
'''
for idx in reversed(range(1, len(value))):
if not isinstance(value[idx], (list, tuple)):
raise ValueError('Input elements must be list or tuples')
multiplier = len(value[idx]) / len(value[idx - 1])
if multiplier > int(multiplier):
# is not integer
raise ValueError(
'Length of the next list must be multiple of the previous.')
else:
multiplier = int(multiplier)
if multiplier > 1:
value[idx - 1] = flatten_list(
[value[idx - 1] for i in range(multiplier)])
return flatten_list(list(zip(*value)))
def sos_group_input_adam(*lsts):
'''
https://stackoverflow.com/questions/48346169/fast-zip-list-of-lists-while-completing-shorter-lists-by-cycling
'''
n = len(lsts) - 1
cyclic = [
lst if i == n else itertools.cycle(lst) for i, lst in enumerate(lsts)
]
return list(itertools.chain.from_iterable(zip(*cyclic)))
def sos_group_input(*lsts):
'''
https://stackoverflow.com/questions/48346169/fast-zip-list-of-lists-while-completing-shorter-lists-by-cycling
'''
lsts = sorted(lsts, key=len)
return list(
chain(*islice(zip(*(cycle(l) for l in lsts)), 0, len(lsts[-1]))))
def round_print(text, sep, pc=None):
if pc is None:
print(text)
return
for line in text.split('\n'):
line = line.rstrip().split(sep)
for i, value in enumerate(line):
try:
line[i] = int(value)
except Exception:
try:
line[i] = float(value)
except Exception:
line[i] = value
print(
sep.join([('{0:.' + str(pc) +
'E}').format(x) if isinstance(x, float) else str(x)
for x in line]).strip())
def install_package(lib, libtype, dryrun=False, autoinstall=False):
groups = re.search('(.*?)\((.*?)\)', lib)
if groups is not None:
lib = groups.group(1).strip()
versions = [x.strip() for x in groups.group(2).split(',')]
# only allow for one version specification: in previous versions of DSC we support multiple versions
# but now restricting it to just one version because I think it is more confusing than helpful otherwise
version = versions[0]
else:
version = ""
if version and not (version.startswith('=') or version.startswith(">") or
version.startswith("<") or version.startswith("!=")):
version = f"=={version}"
if not dryrun:
logger.info(
f"{'Checking / installing' if autoinstall else 'Checking'} {libtype.replace('_', ' ')} {lib}{(' version' + version) if version else ''} ..."
)
if libtype == 'R_library':
from sos.targets_r import R_library
return R_library(f"{lib}{version}",
autoinstall=autoinstall).target_exists()
else:
from sos.targets_python import Py_Module
return Py_Module(f"{lib}{version}",
autoinstall=autoinstall).target_exists()
else:
return (lib, version)
def install_package_interactive(lib, libtype, required=True):
am = AnswerMachine()
if libtype == 'R_library':
from sos.targets_r import R_library as target_check
else:
from sos.targets_python import Py_Module as target_check
ret = target_check(lib).target_exists()
if not ret:
if am.get(
f"{libtype.replace('_', ' ')} {lib.split('@')[0]} needs to be installed or updated. Would you like to proceed (requires internet connection)"
):
ret = target_check(lib, autoinstall=True).target_exists()
if not ret and required:
raise ModuleNotFoundError(
f"Required {libtype.replace('_', ' ')} ``{lib.split('@')[0]}`` is not available or obsolete. Please install it and try again."
)
def make_html_name(value):
return "".join(x for x in value.replace(' ', '-')
if x.isalnum() or x in ['-', '_']).lower()
def yaml2html(content, to_file, title=''):
if os.path.isfile(content):
content = open(content).read()
if not os.path.splitext(to_file)[1] == '.html':
to_file += '.html'
with open(to_file, 'w') as f:
f.write('<!DOCTYPE html><html><head><title>DSC - {}</title>\n'.format(
title))
f.write('<style type="text/css">\n')
f.write(HTML_CSS)
f.write('\n</style>\n<script type="text/javascript">\n')
f.write(HTML_JS)
f.write('</script></head><body>{}<pre><code class='\
'"language-yaml; line-numbers; left-trim; right-trim;">\n'.\
format('<h3>{}:</h3>'.format(os.path.basename(title)) if title else ''))
f.write(content)
f.write('\n</code></pre></body></html>')
def transcript2html(content, to_file, title=''):
if os.path.isfile(content):
content = open(content).readlines()
if not os.path.splitext(to_file)[1] == '.html':
to_file += '.html'
with open(to_file, 'w') as f:
f.write('<!DOCTYPE html><html><head><title>DSC - {}</title>\n'.format(
title))
f.write('<style type="text/css">\n')
f.write(HTML_CSS)
f.write('\n</style>\n<script type="text/javascript">\n')
f.write(HTML_JS)
f.write('</script></head><body>')
idx = 1
for line in content:
if not re.match(r'^\s', line):
continue
if line.strip().startswith(
'##') and "script UUID:" in line and len(
line.strip().split()) == 5:
if idx > 1:
f.write('\n</code></pre>\n')
lan = line.split()[1]
f.write('{0} script {1}<pre><code class='\
'"language-{2}; line-numbers; left-trim; right-trim;">\n'.\
format(lan.capitalize(), idx, lan.lower()))
idx += 1
f.write(line[4:])
if idx > 1:
f.write('\n</code></pre>')
f.write('</body></html>')
def md2html(content, to_file):
import pypandoc
if os.path.isfile(content):
content = open(content).read()
if not os.path.splitext(to_file)[1] == '.html':
to_file += '.html'
output = pypandoc.convert_text(content, 'html', format='md')
with open(to_file, 'w') as f:
f.write(output)
def dsc2html(dsc_conf,
output,
sequences,
modules,
lib_content=None,
summary_table=None):
'''
section_content: ordered dictionary of lists,
{'section 1': ['exec1.R', 'exec2.py']}
'''
lib_content = lib_content or []
modules = dict(modules)
section_content = [('->'.join(x), [(i, modules[i]) for i in x])
for x in sequences]
section_content = dict(section_content + lib_content)
languages = {
'py': 'python',
'sh': 'bash',
'rb': 'ruby',
'r': 'r',
'm': 'matlab',
'pl': 'perl'
}
if not os.path.splitext(output)[1] == '.html':
output += '.html'
with open(output, 'w') as f:
# header and style/scripts
f.write('<!DOCTYPE html><html><head><title>DSC - {}</title>\n'.format(
os.path.basename(output)[:-5]))
f.write('<style type="text/css">\n')
f.write(HTML_CSS)
f.write('\n</style>\n<script type="text/javascript">\n')
f.write(HTML_JS)
# DSC script file
f.write('</script></head><body><h2>DSC <a class="various" href="#dsc_conf">configuration script</a>{}</h2>\n'.\
format('' if summary_table is None else ' and module summary'))
f.write(
'<div style="display:none"><div id="dsc_conf"><pre><code class="language-yaml; '
'line-numbers; left-trim; right-trim;">\n')
f.write(dsc_conf)
f.write('\n</code></pre></div></div>\n')
if summary_table is not None:
f.write('<hr>'.join(summary_table))
f.write('<h2>DSC pipelines</h2>\n<div class="accordion">\n')
# DSC sections with executable scripts
for name, contents in section_content.items():
# get section scripts
scripts = []
seen = []
for content in contents:
try:
module, command = content
except Exception:
command = content
module = ''
if isinstance(command, str):
# for libs
if command in seen:
continue
else:
seen.append(command)
try:
text = open(command).read()
except Exception:
continue
scripts.append(
(module + ' (' + os.path.basename(command) + ')',
os.path.splitext(command)[1][1:].lower(), text))
else:
# for exec is dict
text = (command['header'] + '\n' + command['content']) \
if len(command['header'] + command['content']) else ' '.join(command['path'])
if command['args']:
text = f"# Command arguments: {command['args']}\n" + text
cmd_files = '+'.join(command['file'])
scripts.append(
(module + ' (' +
(cmd_files
or languages[command['type'].lower()].capitalize()) +
' ' + command['signature'] + ')',
command['type'].lower(), text))
if len(scripts) == 0:
continue
f.write('<div class="accodion-section">\n'
'<a class="accordion-section-title" href="#{1}">{0}</a>\n'
'<div id={1} class="accordion-section-content">\n'.format(
name, make_html_name(name)))
f.write('<div class="tabs">\n<ul class="tab-links">\n')
for idx, script in enumerate(scripts):
f.write('<li{2}><a href="#{0}">{1}</a></li>\n'.\
format(make_html_name(name + '_' + script[0]), script[0],
' class="active"' if idx == 0 else ''))
f.write('</ul>\n<div class="tab-content">\n')
for idx, script in enumerate(scripts):
f.write('<div id="{0}" class="tab{1}">\n'.\
format(make_html_name(name + '_' + script[0]), ' active' if idx == 0 else ''))
f.write('<pre><code class="{}line-numbers; left-trim; right-trim;">\n'.\
format(("language-" + languages[script[1]] + "; ") if script[1] in languages else ''))
f.write(script[2])
f.write('\n</code></pre></div>\n')
f.write('</div></div></div></div>\n')
f.write('\n</div></body></html>')
def workflow2html(output, *multi_workflows):
with open(output, 'w') as f:
# header and style/scripts
f.write('<!DOCTYPE html><html><head><title>DSC - {}</title>\n'.format(
os.path.basename(output)[:-5]))
f.write('<style type="text/css">\n')
f.write(HTML_CSS)
f.write('\n</style>\n<script type="text/javascript">\n')
f.write(HTML_JS)
# DSC script file
f.write('</script></head><body>\n')
f.write('<div class="accordion">\n')
for j, workflow_content in enumerate(multi_workflows):
for i, modules in enumerate(workflow_content):
if i > 0:
f.write('\n<hr>\n')
f.write('<div class="accodion-section">\n'
'<a class="accordion-section-title" href="#{1}">{0}</a>\n'
'<div id={1} class="accordion-section-content">\n'.\
format('->'.join(modules.keys()), make_html_name('_'.join(modules.keys()) + f'_{j+1}')))
f.write('<div class="tabs">\n<ul class="tab-links">\n')
idx = 0
for key, module in modules.items():
name = module.name if hasattr(module,
'name') else 'DSC_' + key
f.write('<li{2}><a href="#{0}">{1}</a></li>\n'.\
format(make_html_name(name + f'_{j+1}_{i+1}'),
name,
' class="active"' if idx == 0 else ''))
idx += 1
f.write('</ul>\n<div class="tab-content">\n')
idx = 0
for key, module in modules.items():
name = module.name if hasattr(module,
'name') else 'DSC_' + key
f.write('<div id="{0}" class="tab{1}">\n'.\
format(make_html_name(name + f'_{j+1}_{i+1}'),
' active' if idx == 0 else ''))
f.write('<pre><code class="{}line-numbers; left-trim; right-trim;">\n'.\
format("language-yaml; "))
f.write(
str(module) if not isinstance(module, list) else '\n'.
join(['- ' + str(x) for x in module]))
f.write('\n</code></pre></div>\n')
idx += 1
f.write('</div></div></div></div>\n')
if j + 1 != len(multi_workflows):
f.write('<hr size="8">\n')
f.write('\n</div></body></html>')
def locate_file(file_name, file_path):
'''Use file_path information to try to complete the path of file'''
if file_path is None | |
décevrai", "tu décevras", "il décevra", "elle décevra",
"nous décevrons", "vous décevrez", "ils décevront", "elles décevront",),
"découvrir": ("je découvrirai", "tu découvriras", "il découvrira", "elle découvrira",
"nous découvrirons", "vous découvrirez", "ils découvriront", "elles découvriront"),
"déduire": ("je déduirai", "tu déduiras", "il déduira", "elle déduira",
"nous déduirons", "vous déduirez", "ils déduiront", "elles déduiront",),
"descendre": ("je descendrai", "tu descendras", "il descendra", "elle descendra",
"nous descendrons", "vous descendrez", "ils descendront", "elles descendront",),
"détendre": ("je détendrai", "tu détendras", "il détendra", "elle détendra",
"nous détendrons", "vous détendrez", "ils détendront", "elles détendront",),
"devenir": ("je deviendrai", "tu deviendras", "il deviendra", "elle deviendra",
"nous deviendrons", "vous deviendrez", "ils deviendront", "elles deviendront",),
"devoir": ("je devrai", "tu devras", "il devra", "elle devra",
"nous devrons", "vous devrez", "ils devront", "elles devront",),
"dire": ("je dirai", "tu diras", "il dira", "elle dira",
"nous dirons", "vous direz", "ils diront", "elles diront",),
"disparaître": ("je disparaîtrai", "tu disparaîtras",
"il disparaîtra", "elle disparaîtra",
"nous disparaîtrons", "vous disparaîtrez",
"ils disparaîtront", "elles disparaîtront",),
"dormir": ("je dormirai", "tu dormiras", "il dormira", "elle dormira",
"nous dormirons", "vous dormirez", "ils dormiront", "elles dormiront",),
"écrire": ("j'écrirai", "tu écriras", "il écrira", "elle écrira",
"nous écrirons", "vous écrirez", "ils écriront", "elles écriront",),
"enduire": ("j'enduirai", "tu enduiras", "il enduira", "elle enduira",
"nous enduirons", "vous enduirez", "ils enduiront", "elles enduiront",),
"entendre": ("j'entendrai", "tu entendras", "il entendra", "elle entendra",
"nous entendrons", "vous entendrez", "ils entendront", "elles entendront",),
"être": ("je serai", "tu seras", "il sera", "elle sera",
"nous serons", "vous serez", "ils seront", "elles seront",),
"faire": ("je ferai", "tu feras", "il fera", "elle fera",
"nous ferons", "vous ferez", "ils feront", "elles feront",),
"falloir": ("", "", "il faudra", "", "", "", "", ""),
"frire": ("je frirai", "tu friras", "il frira", "", "", "", "", ""),
"s'inscrire": ("je m'inscrirai", "tu t'inscriras",
"il s'inscrira", "elle s'inscrira",
"nous nous inscrirons", "vous vous inscrirez",
"ils s'inscriront", "elles s'inscriront",),
"inscrire": ("j'inscrirai", "tu inscriras", "il inscrira", "elle inscrira",
"nous inscrirons", "vous inscrirez", "ils inscriront", "elles inscriront",),
"joindre": ("je joindrai", "tu joindras", "il joindra", "elle joindra",
"nous joindrons", "vous joindrez", "ils joindront", "elles joindront",),
"lire": ("je lirai", "tu liras", "il lira", "elle lira",
"nous lirons", "vous lirez", "ils liront", "elles liront",),
"mettre": ("je mettrai", "tu mettras", "il mettra", "elle mettra",
"nous mettrons", "vous mettrez", "ils mettront", "elles mettront",),
"mourir": ("je mourrai", "tu mourras", "il mourra", "elle mourra",
"nous mourrons", "vous mourrez", "ils mourront", "elles mourront",),
"naître": ("je naîtrai", "tu naîtras", "il naîtra", "elle naîtra",
"nous naîtrons", "vous naîtrez", "ils naîtront", "elles naîtront",),
"obtenir": ("j'obtiendrai", "tu obtiendras", "il obtiendra", "elle obtiendra",
"nous obtiendrons", "vous obtiendrez", "ils obtiendront", "elles obtiendront",),
"offrir": ("j'offrirai", "tu offriras", "il offrira", "elle offrira",
"nous offrirons", "vous offrirez", "ils offriront", "elles offriront",),
"ouvrir": ("j'ouvrirai", "tu ouvriras", "il ouvrira", "elle ouvrira",
"nous ouvrirons", "vous ouvrirez", "ils ouvriront", "elles ouvriront",),
"partir": ("je partirai", "tu partiras", "il partira", "elle partira",
"nous partirons", "vous partirez", "ils partiront", "elles partiront",),
"peindre": ("je peindrai", "tu peindras", "il peindra", "elle peindra",
"nous peindrons", "vous peindrez", "ils peindront", "elles peindront",),
"pendre": ("je pendrai", "tu pendras", "il pendra", "elle pendra",
"nous pendrons", "vous pendrez", "ils pendront", "elles pendront",),
"percevoir": ("je percevrai", "tu percevras", "il percevra", "elle percevra",
"nous percevrons", "vous percevrez", "ils percevront", "elles percevront",),
"perdre": ("je perdrai", "tu perdras", "il perdra", "elle perdra",
"nous perdrons", "vous perdrez", "ils perdront", "elles perdront",),
"permettre": ("je permettrai", "tu permettras", "il permettra", "elle permettra",
"nous permettrons", "vous permettrez", "ils permettront", "elles permettront",),
"pleuvoir": ("", "", "il pleuvra", "", "", "", "", ""),
"pourvoir": ("je pourvoirai", "tu pourvoiras", "il pourvoira", "elle pourvoira",
"nous pourvoirons", "vous pourvoirez", "ils pourvoiront", "elles pourvoiront",),
"pouvoir": ("je pourrai", "tu pourras", "il pourra", "elle pourra",
"nous pourrons", "vous pourrez", "ils pourront", "elles pourront",),
"prendre": ("je prendrai", "tu prendras", "il prendra", "elle prendra",
"nous prendrons", "vous prendrez", "ils prendront", "elles prendront",),
"prescrire": ("je prescrirai", "tu prescriras", "il prescrira", "elle prescrira",
"nous prescrirons", "vous prescrirez", "ils prescriront", "elles prescriront",),
"prétendre": ("je prétendrai", "tu prétendras", "il prétendra", "elle prétendra",
"nous prétendrons", "vous prétendrez", "ils prétendront", "elles prétendront",),
"produire": ("je produirai", "tu produiras", "il produira", "elle produira",
"nous produirons", "vous produirez", "ils produiront", "elles produiront",),
"recevoir": ("je recevrai", "tu recevras", "il recevra", "elle recevra",
"nous recevrons", "vous recevrez", "ils recevront", "elles recevront",),
"réduire": ("je réduirai", "tu réduiras", "il réduira", "elle réduira",
"nous réduirons", "vous réduirez", "ils réduiront", "elles réduiront",),
"rejoindre": ("je rejoindrai", "tu rejoindras", "il rejoindra", "elle rejoindra",
"nous rejoindrons", "vous rejoindrez", "ils rejoindront", "elles rejoindront",),
"rendre": ("je rendrai", "tu rendras", "il rendra", "elle rendra",
"nous rendrons", "vous rendrez", "ils rendront", "elles rendront",),
"résoudre": ("je résoudrai", "tu résoudras", "il résoudra", "elle résoudra",
"nous résoudrons", "vous résoudrez", "ils résoudront", "elles résoudront",),
"rire": ("je rirai", "tu riras", "il rira", "elle rira",
"nous rirons", "vous rirez", "ils riront", "elles riront",),
"savoir": ("je saurai", "tu sauras", "il saura", "elle saura",
"nous saurons", "vous saurez", "ils sauront", "elles sauront",),
"sentir": ("je sentirai", "tu sentiras", "il sentira", "elle sentira",
"nous sentirons", "vous sentirez", "ils sentiront", "elles sentiront",),
"servir": ("je servirai", "tu serviras", "il servira", "elle servira",
"nous servirons", "vous servirez", "ils serviront", "elles serviront",),
"sortir": ("je sortirai", "tu sortiras", "il sortira", "elle sortira",
"nous sortirons", "vous sortirez", "ils sortiront", "elles sortiront",),
"souffrir": ("je souffrirai", "tu souffriras", "il souffrira", "elle souffrira",
"nous souffrirons", "vous souffrirez", "ils souffriront", "elles souffriront",),
"suffire": ("je suffirai", "tu suffiras", "il suffira", "elle suffira",
"nous suffirons", "vous suffirez", "ils suffiront", "elles suffiront",),
"suivre": ("je suivrai", "tu suivras", "il suivra", "elle suivra",
"nous suivrons", "vous suivrez", "ils suivront", "elles suivront",),
"tendre": ("je tendrai", "tu tendras", "il tendra", "elle tendra",
"nous tendrons", "vous tendrez", "ils tendront", "elles tendront",),
"tenir": ("je tiendrai", "tu tiendras", "il tiendra", "elle tiendra",
"nous tiendrons", "vous tiendrez", "ils tiendront", "elles tiendront",),
"tondre": ("je tondrai", "tu tondras", "il tondra", "elle tondra",
"nous tondrons", "vous tondrez", "ils tondront", "elles tondront",),
"tordre": ("je tordrai", "tu tordras", "il tordra", "elle tordra",
"nous tordrons", "vous tordrez", "ils tordront", "elles tordront",),
"traduire": ("je traduirai", "tu traduiras", "il traduira", "elle traduira",
"nous traduirons", "vous traduirez", "ils traduiront", "elles traduiront",),
"valoir": ("je vaudrai", "tu vaudras", "il vaudra", "elle vaudra",
"nous vaudrons", "vous vaudrez", "ils vaudront", "elles vaudront",),
"vendre": ("je vendrai", "tu vendras", "il vendra", "elle vendra",
"nous vendrons", "vous vendrez", "ils vendront", "elles vendront",),
"venir": ("je viendrai", "tu viendras", "il viendra", "elle viendra",
"nous viendrons", "vous viendrez", "ils viendront", "elles viendront",),
"vivre": ("je vivrai", "tu vivras", "il vivra", "elle vivra",
"nous vivrons", "vous vivrez", "ils vivront", "elles vivront",),
"voir": ("je verrai", "tu verras", "il verra", "elle verra",
"nous verrons", "vous verrez", "ils verront", "elles verront",),
"vouloir": ("je voudrai", "tu voudras", "il voudra", "elle voudra",
"nous voudrons", "vous voudrez", "ils voudront", "elles voudront",),
}
for root, ans_tuple in answer.items():
for i, ans in enumerate(ans_tuple):
test_ans = conjug(root, "futur", "indicatif", i)
self.assertEqual(ans, test_ans)
test_ans_tuple = conjug_all(root, "futur", "indicatif")
self.assertTupleEqual(ans_tuple, test_ans_tuple)
def test_present_conditionnel(self):
answer = {
"aimer": ("j'aimerais", "tu aimerais", "il aimerait", "elle aimerait",
"nous aimerions", "vous aimeriez", "ils aimeraient", "elles aimeraient"),
"manger": ("je mangerais", "tu mangerais", "il mangerait", "elle mangerait",
"nous mangerions", "vous mangeriez", "ils mangeraient", "elles mangeraient"),
"choisir": ("je choisirais", "tu choisirais", "il choisirait", "elle choisirait",
"nous choisirions", "vous choisiriez", "ils choisiraient", "elles choisiraient"),
"convaincre": ("je convaincrais", "tu convaincrais", "il convaincrait", "elle convaincrait",
"nous convaincrions", "vous convaincriez", "ils convaincraient", "elles convaincraient"),
"appuyer": ("j'appuierais","tu appuierais","il appuierait","elle appuierait",
"nous appuierions", "vous appuieriez", "ils appuieraient", "elles appuieraient"),
"aller": ("j'irais", "tu irais", "il irait", "elle irait",
"nous irions", "vous iriez", "ils iraient", "elles iraient"),
"avoir": ("j'aurais", "tu aurais", "il aurait", "elle aurait",
"nous aurions", "vous auriez", "ils auraient", "elles auraient"),
"être": ("je serais", "tu serais", "il serait", "elle serait",
"nous serions", "vous seriez", "ils seraient", "elles seraient",),
"faire": ("je ferais", "tu ferais", "il ferait", "elle ferait",
"nous ferions", "vous feriez", "ils feraient", "elles feraient",),
"falloir": ("", "", "il faudrait", "", "", "", "", ""),
"frire": ("je frirais", "tu frirais", "il frirait", "", "", "", "", ""),
"pleuvoir": ("", "", "il pleuvrait", "", "", "", "", ""),
"pouvoir": ("je pourrais", "tu pourrais", "il pourrait", "elle pourrait",
| |
<reponame>deepcrime-tool/DeepCrime
import ast
from copy import deepcopy
from utils import mutation_utils as mu
from utils.logger_setup import setup_logger
import utils.properties as props
logger = setup_logger(__name__)
class Mutation():
applyOnce = True
mutationName = None
def get_model_params_td(self, elem):
"""Extract a dict of params such as x_train, y_train
needed for mutation from a given node
Keyword arguments:
elem -- part of ast node
Returns: dict (params)
"""
params = {}
# check fo the params in element arguments or keywords
if hasattr(elem.value, 'args') and len(elem.value.args) > 0:
# params["x_train"] = elem.value.args[0].id
# params["y_train"] = elem.value.args[1].id
params["x_train"] = elem.value.args[0]
params["y_train"] = elem.value.args[1]
elif hasattr(elem.value, 'keywords') and len(elem.value.keywords) > 0:
for x in elem.value.keywords:
# print(x)
if x.arg == 'x':
# params["x_train"] = x.value.id
params["x_train"] = x.value
if x.arg == 'y':
# params["y_train"] = x.value.id
params["y_train"] = x.value
else:
logger.error("Mutation.get_model_params_td AST node does not have arguments or keywords")
return params
def get_model_params_hp(self, elem):
"""Extract a dict of params such as number of epochs, batch size, etc.
needed for mutation from a given node
Keyword arguments:
elem -- part of ast node
Returns: dict (params)
"""
params = {}
if hasattr(elem.value, 'keywords') and len(elem.value.keywords) > 0:
for k in elem.value.keywords:
if type(k.value) == ast.Name:
params[k.arg] = k.value.id
elif type(k.value) == ast.Num:
params[k.arg] = k.value.n
elif type(k.value) == ast.Str:
params[k.arg] = k.value.s
elif type(k.value) == ast.Constant:
params[k.arg] = k.value.value
elif type(k.value) == ast.Attribute:
params[k.arg] = 'attr'
else:
logger.error("Mutation.get_model_params_hp AST node does not have keywords")
return params
def add_keyword(self, elem, kwd_name, kwd_value):
try:
elem.value.keywords.append(ast.keyword(arg=kwd_name, value=ast.Name(id=kwd_value, ctx=ast.Load())))
except Exception:
logger.error("Mutation.add_keyword adding keyword to AST node failed")
#TODO: Check if we need this method
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
params["module_name"] = "dummy_operators"
params["operator_name"] = "dummy_operator"
return params
def mutate(self, file_path, save_path_mutated):
""" Apply mutation to the given model.
Keyword arguments:
file_path -- path to the py file with model
save_path_mutated -- path for the mutated model to be saved to
Returns: -
"""
# Parse the code for ast tree
try:
with open(file_path, "r") as source:
tree = ast.parse(source.read())
except Exception as e:
logger.error("Mutation.mutate parse AST tree failed" + str(e))
raise
try:
was_annotated = self.mutate_annotated(tree, save_path_mutated)
except Exception as e:
logger.error("Mutation.mutate_annotated failed:" + str(e))
try:
if not was_annotated:
self.mutate_automatically(tree, save_path_mutated)
except Exception as e:
logger.error("Mutation.mutate_automatically failed" + str(e))
def mutate_annotated(self, tree, save_path_mutated):
""" Apply mutation to the given model with annotated params.
Keyword arguments:
tree -- AST tree
save_path_mutated -- path for the mutated model to be saved to
Returns: -
"""
application_cnt = 0
model_params_ann = {}
was_annotated = False
mutation_params = getattr(props, self.mutationName)
for x in mutation_params["annotation_params"]:
model_params_ann[x] = None
# Look for the right place to insert the mutation
# Commented is the functionality to apply the same mutation a number of times (Not needed atm)
for node in ast.walk(tree):
if hasattr(node, 'body') and isinstance(node.body, list):
for ind, x in enumerate(node.body):
# check for annotation
mu.check_for_annotation(x, model_params_ann)
# if all annotations find then insert mutation
if not None in model_params_ann.values() \
and len(model_params_ann) > 0:
# original_x = deepcopy(node.body[ind])
#
self.apply_mutation(node, x, ind+1, model_params_ann)
ast.fix_missing_locations(tree)
save_path_mutated_cnt = save_path_mutated + str(application_cnt) + '.py'
mu.unparse_tree(tree, save_path_mutated_cnt)
was_annotated = True
break
# if self.applyOnce:
# break
# else:
# application_cnt += 1
# node.body[ind] = original_x
if not None in model_params_ann.values():
break
return was_annotated
def mutate_automatically(self, tree, save_path_mutated):
""" Apply mutation to the given model (no annotations - automated)
Keyword arguments:
tree -- AST tree
save_path_mutated -- path for the mutated model to be saved to
Returns: -
"""
application_cnt = 0
# Look for the right place to insert the mutation
for node in ast.walk(tree):
if hasattr(node, 'body') and isinstance(node.body, list):
for ind, x in enumerate(node.body):
if self.is_target_node(x):
original_x = deepcopy(node.body[ind])
self.apply_mutation(node, x, ind)
ast.fix_missing_locations(tree)
save_path_mutated_cnt = save_path_mutated + str(application_cnt) + '.py'
mu.unparse_tree(tree, save_path_mutated_cnt)
if self.applyOnce:
break
else:
application_cnt += 1
node.body[ind] = original_x
def apply_mutation(self, node, elem, ind, model_params = None):
# Each class has its own implementation
return None
def is_target_node(self, elem):
# Each class has its own implementation
return None
#########################################
########### Training DATA ############
class ChangeLabelTDMut(Mutation):
mutationName = "change_label"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def get_model_params(self, elem):
return self.get_model_params_td(elem)
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
label = None
percentage = -1
params["module_name"] = "training_data_operators"
params["operator_name"] = "operator_change_labels"
params["label"] = "properties.change_label['change_label_label']"
params["percentage"] = "properties.change_label['change_label_pct']"
return params
def generate_mutation_node(self, elem, model_params_ann = None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
# Get model specific params
if model_params_ann:
model_params = model_params_ann
model_params["y_train"] = ast.Name(id=model_params["y_train"], ctx=ast.Store())
else:
model_params = self.get_model_params(elem)
# Get mutation specific params
mutation_params = self.get_mutation_params()
# print(mutation_params)
mutation_node = ast.Assign(targets=[
# ast.Name(id=model_params["y_train"], ctx=ast.Store()),
model_params["y_train"],
],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[
# ast.Name(id=model_params["y_train"], ctx=ast.Load()),
# ast.Str(s=mutation_params["label"]),
# ast.Num(n=mutation_params["percentage"]), ],
model_params["y_train"],
ast.Name(id=mutation_params["label"], ctx=ast.Load()),
ast.Name(id=mutation_params["percentage"], ctx=ast.Load()),],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
# generate a mutation call
mutation_node = self.generate_mutation_node(elem, model_params_ann)
# insert a mutation call
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params_ann = None):
self.insert_mutation(node, elem, ind, model_params_ann)
class DeleteTDMut(Mutation):
mutationName = "delete_training_data"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def get_model_params(self, elem):
return self.get_model_params_td(elem)
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
params["module_name"] = "training_data_operators"
params["operator_name"] = "operator_delete_training_data"
params["percentage"] = "properties.delete_training_data['delete_train_data_pct']"
return params
def generate_mutation_node(self, elem, model_params_ann = None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
# Get model specific params
if model_params_ann:
model_params = model_params_ann
model_params["x_train"] = ast.Name(id=model_params["x_train"], ctx=ast.Store())
model_params["y_train"] = ast.Name(id=model_params["y_train"], ctx=ast.Store())
else:
model_params = self.get_model_params(elem)
# Get mutation specific params
mutation_params = self.get_mutation_params()
mutation_node = ast.Assign(targets=[ast.Tuple(elts=[
# ast.Name(id=model_params["x_train"], ctx=ast.Store()),
# ast.Name(id=model_params["y_train"], ctx=ast.Store()),
model_params["x_train"],
model_params["y_train"],
], ctx=ast.Store()),
],
value=ast.Call(
func=ast.Attribute(value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[
# ast.Name(id=model_params["x_train"], ctx=ast.Load()),
# ast.Name(id=model_params["y_train"], ctx=ast.Load()),
model_params["x_train"],
model_params["y_train"],
ast.Name(id=mutation_params["percentage"], ctx=ast.Load()), ],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
# generate a mutation call
mutation_node = self.generate_mutation_node(elem, model_params_ann)
# insert a mutation call
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params_ann = None):
self.insert_mutation(node, elem, ind, model_params_ann)
class OutputClassesOverlapTDMUT(Mutation):
mutationName = "make_output_classes_overlap"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def get_model_params(self, elem):
return self.get_model_params_td(elem)
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
params["module_name"] = "training_data_operators"
params["operator_name"] = "operator_make_output_classes_overlap"
params["percentage"] = "properties.make_output_classes_overlap['make_output_classes_overlap_pct']"
return params
def generate_mutation_node(self, elem, model_params_ann=None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
| |
'''Perform optimizations on an AST.
The optimize_tree function is the main interface to the optimizer, which will
optimize an AST in place. The function can optimize at two different levels.
Level 1: Minimal optimization is performed.
Level 2: More extensive optimization is performed that may produce code that is
not equivalent to the original code.
The optimizer assumes the AST is both syntactically and semantically valid.
'''
import itertools
import tokens
import syntaxtree
class ConstantFolder(syntaxtree.TreeMutator):
'''Optimizer that performs constant folding within single expressions.'''
def __init__(self):
super(ConstantFolder, self).__init__()
self.visit_functions = {
syntaxtree.BinaryOp: self.visit_binary_op,
syntaxtree.UnaryOp: self.visit_unary_op,
}
def get_const(self, node):
'''Return the value of a constant AST node, or None if the node is not a number.'''
if isinstance(node, syntaxtree.Num):
if node.n == tokens.TRUE:
return 'True'
if node.n == tokens.FALSE:
return 'False'
return node.n
return None
def visit_binary_op(self, node):
# Fold children first so that we can fold parts of an expression even if
# the entire expression is not constant.
self.visit_children(node)
left = self.get_const(node.left)
if left is not None:
right = self.get_const(node.right)
if right is not None:
op = node.op
if node.node_type == tokens.BOOL:
if op == tokens.AND:
op = 'and'
elif op == tokens.OR:
op = 'or'
result = eval('%s %s %s' % (left, op, right))
if result is True:
return syntaxtree.Num('true')
if result is False:
return syntaxtree.Num('false')
return syntaxtree.Num(str(result))
return node
def visit_unary_op(self, node):
self.visit_children(node)
operand = self.get_const(node.operand)
if operand is not None:
op = node.op
if op == tokens.NOT and node.node_type == tokens.INT:
# 32-bit NOT
return syntaxtree.Num(str(eval('(~%s) & 0xffffffff' % operand)))
result = eval('%s %s' % (node.op, operand))
if result is True:
return syntaxtree.Num('true')
if result is False:
return syntaxtree.Num('false')
return syntaxtree.Num(str(result))
return node
class ConstantPropagator(ConstantFolder):
'''Optimize that performs both constant folding and propagation.
This optimizer can detect some uses of uninitialized variables and will
optionally report them as errors.
This optimizer combines constant folding and propagation into a single pass
over the tree. By combining the two operations, we can save several walks
over the tree that would be necessary if the two were interleaved.
Instead of taking the time to construct explicit U-D chains for the
propagation, we can simply record the constant value of known variables in a
symbol table and invalidate that value if we reach a non-constant
assignment. Since we do the propagation inline with folding, and the walk is
in program order, this will produce correct code as long as we don't miss
any invalidations.'''
def __init__(self, print_errors=False):
super(ConstantFolder, self).__init__()
self.print_errors = print_errors
# Scopes store the value of variables known to be constant, and the
# declarations of procedures, so that we can tell which parameters are in/out
self.global_scope = {}
self.scopes = [{}]
self.visit_functions = {
syntaxtree.Program: self.visit_program,
syntaxtree.ProcDecl: self.visit_procdecl,
syntaxtree.Assign: self.visit_assign,
syntaxtree.BinaryOp: self.visit_binary_op,
syntaxtree.UnaryOp: self.visit_unary_op,
syntaxtree.If: self.visit_jump,
syntaxtree.For: self.visit_jump,
syntaxtree.Call: self.visit_call,
}
# Use a stack to keep track of whether we're in a loop or not since we
# can have nested blocks. If there is a value on this stack, all
# assignments will result in an invalidation. This is more conservative
# than necessary, but without more reaching definition analysis, we
# can't tell if an assignment is loop-invariant.
self.stop_propagation = []
def is_literal(self, node):
return isinstance(node, (syntaxtree.Num, syntaxtree.Str))
def enter_scope(self):
self.scopes.append({})
def leave_scope(self):
self.scopes.pop()
def define_variable(self, name, value, is_global=False):
if isinstance(name, syntaxtree.Subscript):
name = name.name
if is_global:
self.global_scope[name] = value
else:
self.scopes[-1][name] = value
def get_var(self, name):
if isinstance(name, syntaxtree.Name):
key = name
elif isinstance(name, syntaxtree.Subscript):
key = name.name
try:
return self.scopes[-1][key]
except KeyError:
try:
return self.global_scope[key]
except KeyError:
if self.print_errors:
msg = 'Uninitialized variable referenced'
if name.token:
underline = '^' if name.token.start == name.token.end else '~'
line = name.token.line.rstrip()
print ('Warning on line %s: %s\n'
' %s\n'
' %s') % (name.token.lineno, msg, line,
''.join((underline if name.token.start <= i <= name.token.end else ' ')
for i in xrange(len(line))))
else:
print msg
self.print_errors = False
return None
def get_const(self, node):
# This overrides ConstantFolder's method to return the propagated value
# of variables.
if isinstance(node, syntaxtree.Num):
return node.n
if isinstance(node, syntaxtree.Name):
value = self.get_var(node)
if isinstance(value, syntaxtree.Num):
return value.n
return None
def visit_program(self, node):
for decl in node.decls:
if isinstance(decl, syntaxtree.ProcDecl):
self.define_variable(decl.name, decl, decl.is_global)
self.visit_children(node)
return node
def visit_procdecl(self, node):
self.enter_scope()
# Add this declaration to its own scope to allow recursion.
self.define_variable(node.name, node)
# Add parameters to so they don't get flagged as uninitialized reference errors.
for param in node.params:
self.define_variable(param.var_decl.name, None)
for decl in node.decls:
if isinstance(decl, syntaxtree.ProcDecl):
self.define_variable(decl.name, decl)
self.visit_children(node)
self.leave_scope()
return node
def visit_assign(self, node):
# self.visit_children will fold the value if possible
if not self.stop_propagation:
self.visit_children(node)
# Don't propagate arrays, since that could take up too much memory.
#if isinstance(node.target, syntaxtree.Name):
if self.stop_propagation or isinstance(node.target, syntaxtree.Subscript):
# Unset any variables we find if we're in a loop or branch
self.define_variable(node.target, None)
else:
if self.is_literal(node.value):
self.define_variable(node.target, node.value)
# Expressions consisting of a single variable don't get picked
# up by the constant folder.
if isinstance(node.value, syntaxtree.Name):
const = self.get_const(node.value)
if const is not None:
return syntaxtree.Assign(node.target, syntaxtree.Num(const),
token=node.token)
return node
def visit_jump(self, node):
self.stop_propagation.append(True)
self.visit_children(node)
self.stop_propagation.pop()
return node
def visit_call(self, node):
decl = self.get_var(node.func)
for i, (param, arg) in enumerate(itertools.izip(decl.params, node.args)):
# Unset variables sent as out parameters
if param.direction == tokens.OUT:
self.define_variable(arg, None)
else:
# Propagate variables sent as in parameters
if isinstance(arg, syntaxtree.Name):
value = self.get_var(arg)
if value is not None:
node.args[i] = value
return node
class DeadCodeEliminator(syntaxtree.TreeMutator):
'''Optimizer that eliminates dead branches, loops, assignments, and declarations.
This optimizer does not construct explicit D-U chains or SSA structures.
Instead, it works by walking the AST in reverse program order, which is a
top-down, right-to-left walk of the AST, marking variables as used or
assigned as it encounters them. When it reaches an assignment or
declaration, it will have encountered all references to that identifier
already.
'''
ASSIGNED = 1
REFERENCED = 2
UNKNOWN = 3
def __init__(self):
super(DeadCodeEliminator, self).__init__()
# Scopes store whether variables have been read or assigned to
# For procedures, they store a tuple of (decl, read or assigned)
self.global_scope = {}
self.scopes = []
self.visit_functions = {
syntaxtree.ProcDecl: self.visit_block,
syntaxtree.Program: self.visit_block,
syntaxtree.Assign: self.visit_assign,
syntaxtree.Name: self.visit_name,
syntaxtree.Call: self.visit_call,
syntaxtree.If: self.visit_if,
syntaxtree.For: self.visit_for,
syntaxtree.VarDecl: self.visit_vardecl,
}
def enter_scope(self):
self.scopes.append({})
def leave_scope(self):
self.scopes.pop()
def define_var(self, name, value, is_global=False):
if is_global:
self.global_scope[name] = value
else:
self.scopes[-1][name] = value
def set_var(self, name, value):
if name in self.scopes[-1]:
self.scopes[-1] = value
elif name in self.global_scope:
self.global_scope[name] = value
# It's ok if we try to set undefined variables: we never define
# procedure parameters since they can't be eliminated.
def get_var(self, name):
if isinstance(name, syntaxtree.Name):
key = name
elif isinstance(name, syntaxtree.Subscript):
key = name.name
try:
return self.scopes[-1][key]
except KeyError:
return self.global_scope.get(key)
def walk_body(self, node, attrname='body'):
# Manually walk the body in reverse to construct implicit D-U Chains.
new_body = []
for child in reversed(getattr(node, attrname)):
value = self.visit(child)
if value is not None:
if isinstance(value, list):
new_body = value + new_body
else:
new_body.insert(0, value)
setattr(node, attrname, new_body)
def visit_block(self, node):
# This function is used for both Program and ProcDecl nodes
if isinstance(node, syntaxtree.ProcDecl):
if self.get_var(node.name)[1] is None:
return None
self.enter_scope()
if isinstance(node, syntaxtree.ProcDecl):
self.define_var(node.name, (node, None))
# Mark out parameters as unknown. Since they can't be read from,
# they won't be marked referenced, but we don't want to eliminate
# their assignments.
for param in node.params:
if param.direction == tokens.OUT:
self.define_var(param.var_decl.name, self.UNKNOWN)
for decl in node.decls:
if isinstance(decl, syntaxtree.ProcDecl):
self.define_var(decl.name, (decl, None), decl.is_global)
else:
self.define_var(decl.name, None)
self.walk_body(node)
self.walk_body(node, 'decls')
# If there's a return in the body, it isn't in a branch, so | |
of Green Vegetation",
"needed_bands": ["PV_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc'
},
{
'value': 25,
'color': '#c2e699'
},
{
'value': 50,
'color': '#78c679'
},
{
'value': 75,
'color': '#31a354'
},
{
'value': 100,
'color': '#006837'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "green_veg_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Green Vegetation",
"needed_bands": ["PV_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#ffffcc'
},
{
'value': 25,
'color': '#c2e699'
},
{
'value': 50,
'color': '#78c679'
},
{
'value': 75,
'color': '#31a354'
},
{
'value': 100,
'color': '#006837'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "green_veg_10",
},
]
},
{
"name": "fcp_non_green_veg",
"title": "Fractional Cover Percentiles - Non Green Vegetation",
"abstract": "",
"products": [
{
"label": "Fractional Cover Percentiles - Non Green Vegetation",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program, for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
This contains the percentage of non-green vegetation per pixel at the 10th, 50th (median) and 90th percentiles for observations acquired in each full calendar year (1st of January - 31st December) from 1987 to the most recent full calendar year.
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_non_green_veg",
"product_name": "fc_percentile_albers_annual",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"styles": ["non_green_veg_10"]
},
"wcs_default_bands": ["NPV_PC_10", "NPV_PC_50", "NPV_PC_90"],
"styles": [
{
"name": "non_green_veg_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4',
'legend': {}
},
{
'value': 25,
'color': '#fed98e',
'legend': {}
},
{
'value': 50,
'color': '#fe9929',
'legend': {}
},
{
'value': 75,
'color': '#d95f0e',
'legend': {}
},
{
'value': 100,
'color': '#993404',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Non-Green Vegetation",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "non_green_veg_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4'
},
{
'value': 25,
'color': '#fed98e'
},
{
'value': 50,
'color': '#fe9929'
},
{
'value': 75,
'color': '#d95f0e'
},
{
'value': 100,
'color': '#993404'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "non_green_veg_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Non Green Vegetation",
"needed_bands": ["NPV_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#ffffd4'
},
{
'value': 25,
'color': '#fed98e'
},
{
'value': 50,
'color': '#fe9929'
},
{
'value': 75,
'color': '#d95f0e'
},
{
'value': 100,
'color': '#993404'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "non_green_veg_10",
},
]
},
{
"name": "fcp_bare_soil",
"title": "Fractional Cover Percentiles - Bare Soil",
"abstract": "",
"products": [
{
"label": "Fractional Cover Percentiles - Bare Soil",
"abstract": """
Fractional Cover Percentiles version 2.2.0, 25 metre, 100km tile, Australian Albers Equal Area projection (EPSG:3577). Data is only visible at higher resolutions; when zoomed-out the available area will be displayed as a shaded region.
Fractional cover provides information about the the proportions of green vegetation, non-green vegetation (including deciduous trees during autumn, dry grass, etc.), and bare areas for every 25m x 25m ground footprint. Fractional cover provides insight into how areas of dry vegetation and/or bare soil and green vegetation are changing over time. The percentile summaries are designed to make it easier to analyse and interpret fractional cover. Percentiles provide an indicator of where an observation sits, relative to the rest of the observations for the pixel. For example, the 90th percentile is the value below which 90% of the observations fall. The fractional cover algorithm was developed by the Joint Remote Sensing Research Program for more information please see data.auscover.org.au/xwiki/bin/view/Product+pages/Landsat+Fractional+Cover
This contains the percentage of bare soil per pixel at the 10th, 50th (median) and 90th percentiles for observations acquired in each full calendar year (1st of January - 31st December) from 1987 to the most recent full calendar year.
Fractional Cover products use Water Observations from Space (WOfS) to mask out areas of water, cloud and other phenomena. To be considered in the FCP product a pixel must have had at least 10 clear observations over the year.
For service status information, see https://status.dea.ga.gov.au""",
"type": "100km tile",
"variant": "25m",
"name": "fcp_bare_ground",
"product_name": "fc_percentile_albers_annual",
"pq_dataset": "geodata_coast_100k",
"pq_band": "land",
"pq_ignore_time": True,
"min_zoom_factor": 15.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data[band] != data[band].nodata,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": [],
"apply_solar_corrections": False,
"legend": {
"styles": ["bare_ground_10"]
},
"wcs_default_bands": ["BS_PC_10", "BS_PC_50", "BS_PC_90"],
"styles": [
{
"name": "bare_ground_10",
"title": "10th Percentile",
"abstract": "10th Percentile of Bare Soil",
"needed_bands": ["BS_PC_10"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2',
'legend': {}
},
{
'value': 25,
'color': '#fbb4b9',
'legend': {}
},
{
'value': 50,
'color': '#f768a1',
'legend': {}
},
{
'value': 75,
'color': '#c51b8a',
'legend': {}
},
{
'value': 100,
'color': '#7a0177',
'legend': {}
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
"legend": {
"units": "% / pixel",
"title": "Percentage of Pixel that is Bare Soil",
"rcParams": {
"font.size": 9
}
}
},
{
"name": "bare_ground_50",
"title": "50th Percentile",
"abstract": "50th Percentile of Bare Soil",
"needed_bands": ["BS_PC_50"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2'
},
{
'value': 25,
'color': '#fbb4b9'
},
{
'value': 50,
'color': '#f768a1'
},
{
'value': 75,
'color': '#c51b8a'
},
{
'value': 100,
'color': '#7a0177'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
{
"name": "bare_ground_90",
"title": "90th Percentile",
"abstract": "90th Percentile of Bare Soil",
"needed_bands": ["BS_PC_90"],
"color_ramp": [
{
'value': 0,
'color': '#feebe2'
},
{
'value': 25,
'color': '#fbb4b9'
},
{
'value': 50,
'color': '#f768a1'
},
{
'value': 75,
'color': '#c51b8a'
},
{
'value': 100,
'color': '#7a0177'
}
],
"pq_masks": [
{
"flags": {
'sea': True,
},
"invert": True,
},
],
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required | |
<reponame>nannancy/graph-generation
# an implementation for "Learning Deep Generative Models of Graphs"
from main import *
class Args_DGMG():
def __init__(self):
### CUDA
self.cuda = 2
### model type
self.note = 'Baseline_DGMG' # do GCN after adding each edge
# self.note = 'Baseline_DGMG_fast' # do GCN only after adding each node
### data config
self.graph_type = 'caveman_small'
# self.graph_type = 'grid_small'
# self.graph_type = 'ladder_small'
# self.graph_type = 'enzymes_small'
# self.graph_type = 'barabasi_small'
# self.graph_type = 'citeseer_small'
self.max_num_node = 20
### network config
self.node_embedding_size = 64
self.test_graph_num = 200
### training config
self.epochs = 2000 # now one epoch means self.batch_ratio x batch_size
self.load_epoch = 2000
self.epochs_test_start = 100
self.epochs_test = 100
self.epochs_log = 100
self.epochs_save = 100
if 'fast' in self.note:
self.is_fast = True
else:
self.is_fast = False
self.lr = 0.001
self.milestones = [300, 600, 1000]
self.lr_rate = 0.3
### output config
self.model_save_path = 'model_save/'
self.graph_save_path = 'graphs/'
self.figure_save_path = 'figures/'
self.timing_save_path = 'timing/'
self.figure_prediction_save_path = 'figures_prediction/'
self.nll_save_path = 'nll/'
self.fname = self.note + '_' + self.graph_type + '_' + str(self.node_embedding_size)
self.fname_pred = self.note + '_' + self.graph_type + '_' + str(self.node_embedding_size) + '_pred_'
self.fname_train = self.note + '_' + self.graph_type + '_' + str(self.node_embedding_size) + '_train_'
self.fname_test = self.note + '_' + self.graph_type + '_' + str(self.node_embedding_size) + '_test_'
self.load = False
self.save = True
def train_DGMG_epoch(epoch, args, model, dataset, optimizer, scheduler, is_fast = False):
model.train()
graph_num = len(dataset)
order = list(range(graph_num))
shuffle(order)
loss_addnode = 0
loss_addedge = 0
loss_node = 0
for i in order:
model.zero_grad()
graph = dataset[i]
# do random ordering: relabel nodes
node_order = list(range(graph.number_of_nodes()))
shuffle(node_order)
order_mapping = dict(zip(graph.nodes(), node_order))
graph = nx.relabel_nodes(graph, order_mapping, copy=True)
# NOTE: when starting loop, we assume a node has already been generated
node_count = 1
node_embedding = [Variable(torch.ones(1,args.node_embedding_size)).cuda()] # list of torch tensors, each size: 1*hidden
loss = 0
while node_count<=graph.number_of_nodes():
node_neighbor = graph.subgraph(list(range(node_count))).adjacency_list() # list of lists (first node is zero)
node_neighbor_new = graph.subgraph(list(range(node_count+1))).adjacency_list()[-1] # list of new node's neighbors
# 1 message passing
# do 2 times message passing
node_embedding = message_passing(node_neighbor, node_embedding, model)
# 2 graph embedding and new node embedding
node_embedding_cat = torch.cat(node_embedding, dim=0)
graph_embedding = calc_graph_embedding(node_embedding_cat, model)
init_embedding = calc_init_embedding(node_embedding_cat, model)
# 3 f_addnode
p_addnode = model.f_an(graph_embedding)
if node_count < graph.number_of_nodes():
# add node
node_neighbor.append([])
node_embedding.append(init_embedding)
if is_fast:
node_embedding_cat = torch.cat(node_embedding, dim=0)
# calc loss
loss_addnode_step = F.binary_cross_entropy(p_addnode,Variable(torch.ones((1,1))).cuda())
# loss_addnode_step.backward(retain_graph=True)
loss += loss_addnode_step
loss_addnode += loss_addnode_step.data
else:
# calc loss
loss_addnode_step = F.binary_cross_entropy(p_addnode, Variable(torch.zeros((1, 1))).cuda())
# loss_addnode_step.backward(retain_graph=True)
loss += loss_addnode_step
loss_addnode += loss_addnode_step.data
break
edge_count = 0
while edge_count<=len(node_neighbor_new):
if not is_fast:
node_embedding = message_passing(node_neighbor, node_embedding, model)
node_embedding_cat = torch.cat(node_embedding, dim=0)
graph_embedding = calc_graph_embedding(node_embedding_cat, model)
# 4 f_addedge
p_addedge = model.f_ae(graph_embedding)
if edge_count < len(node_neighbor_new):
# calc loss
loss_addedge_step = F.binary_cross_entropy(p_addedge, Variable(torch.ones((1, 1))).cuda())
# loss_addedge_step.backward(retain_graph=True)
loss += loss_addedge_step
loss_addedge += loss_addedge_step.data
# 5 f_nodes
# excluding the last node (which is the new node)
node_new_embedding_cat = node_embedding_cat[-1,:].expand(node_embedding_cat.size(0)-1,node_embedding_cat.size(1))
s_node = model.f_s(torch.cat((node_embedding_cat[0:-1,:],node_new_embedding_cat),dim=1))
p_node = F.softmax(s_node.permute(1,0))
# get ground truth
a_node = torch.zeros((1,p_node.size(1)))
# print('node_neighbor_new',node_neighbor_new, edge_count)
a_node[0,node_neighbor_new[edge_count]] = 1
a_node = Variable(a_node).cuda()
# add edge
node_neighbor[-1].append(node_neighbor_new[edge_count])
node_neighbor[node_neighbor_new[edge_count]].append(len(node_neighbor)-1)
# calc loss
loss_node_step = F.binary_cross_entropy(p_node,a_node)
# loss_node_step.backward(retain_graph=True)
loss += loss_node_step
loss_node += loss_node_step.data
else:
# calc loss
loss_addedge_step = F.binary_cross_entropy(p_addedge, Variable(torch.zeros((1, 1))).cuda())
# loss_addedge_step.backward(retain_graph=True)
loss += loss_addedge_step
loss_addedge += loss_addedge_step.data
break
edge_count += 1
node_count += 1
# update deterministic and lstm
loss.backward()
optimizer.step()
scheduler.step()
loss_all = loss_addnode + loss_addedge + loss_node
if epoch % args.epochs_log==0:
print('Epoch: {}/{}, train loss: {:.6f}, graph type: {}, hidden: {}'.format(
epoch, args.epochs,loss_all[0], args.graph_type, args.node_embedding_size))
# loss_sum += loss.data*x.size(0)
# return loss_sum
def train_DGMG_forward_epoch(args, model, dataset, is_fast = False):
model.train()
graph_num = len(dataset)
order = list(range(graph_num))
shuffle(order)
loss_addnode = 0
loss_addedge = 0
loss_node = 0
for i in order:
model.zero_grad()
graph = dataset[i]
# do random ordering: relabel nodes
node_order = list(range(graph.number_of_nodes()))
shuffle(node_order)
order_mapping = dict(zip(graph.nodes(), node_order))
graph = nx.relabel_nodes(graph, order_mapping, copy=True)
# NOTE: when starting loop, we assume a node has already been generated
node_count = 1
node_embedding = [Variable(torch.ones(1,args.node_embedding_size)).cuda()] # list of torch tensors, each size: 1*hidden
loss = 0
while node_count<=graph.number_of_nodes():
node_neighbor = graph.subgraph(list(range(node_count))).adjacency_list() # list of lists (first node is zero)
node_neighbor_new = graph.subgraph(list(range(node_count+1))).adjacency_list()[-1] # list of new node's neighbors
# 1 message passing
# do 2 times message passing
node_embedding = message_passing(node_neighbor, node_embedding, model)
# 2 graph embedding and new node embedding
node_embedding_cat = torch.cat(node_embedding, dim=0)
graph_embedding = calc_graph_embedding(node_embedding_cat, model)
init_embedding = calc_init_embedding(node_embedding_cat, model)
# 3 f_addnode
p_addnode = model.f_an(graph_embedding)
if node_count < graph.number_of_nodes():
# add node
node_neighbor.append([])
node_embedding.append(init_embedding)
if is_fast:
node_embedding_cat = torch.cat(node_embedding, dim=0)
# calc loss
loss_addnode_step = F.binary_cross_entropy(p_addnode,Variable(torch.ones((1,1))).cuda())
# loss_addnode_step.backward(retain_graph=True)
loss += loss_addnode_step
loss_addnode += loss_addnode_step.data
else:
# calc loss
loss_addnode_step = F.binary_cross_entropy(p_addnode, Variable(torch.zeros((1, 1))).cuda())
# loss_addnode_step.backward(retain_graph=True)
loss += loss_addnode_step
loss_addnode += loss_addnode_step.data
break
edge_count = 0
while edge_count<=len(node_neighbor_new):
if not is_fast:
node_embedding = message_passing(node_neighbor, node_embedding, model)
node_embedding_cat = torch.cat(node_embedding, dim=0)
graph_embedding = calc_graph_embedding(node_embedding_cat, model)
# 4 f_addedge
p_addedge = model.f_ae(graph_embedding)
if edge_count < len(node_neighbor_new):
# calc loss
loss_addedge_step = F.binary_cross_entropy(p_addedge, Variable(torch.ones((1, 1))).cuda())
# loss_addedge_step.backward(retain_graph=True)
loss += loss_addedge_step
loss_addedge += loss_addedge_step.data
# 5 f_nodes
# excluding the last node (which is the new node)
node_new_embedding_cat = node_embedding_cat[-1,:].expand(node_embedding_cat.size(0)-1,node_embedding_cat.size(1))
s_node = model.f_s(torch.cat((node_embedding_cat[0:-1,:],node_new_embedding_cat),dim=1))
p_node = F.softmax(s_node.permute(1,0))
# get ground truth
a_node = torch.zeros((1,p_node.size(1)))
# print('node_neighbor_new',node_neighbor_new, edge_count)
a_node[0,node_neighbor_new[edge_count]] = 1
a_node = Variable(a_node).cuda()
# add edge
node_neighbor[-1].append(node_neighbor_new[edge_count])
node_neighbor[node_neighbor_new[edge_count]].append(len(node_neighbor)-1)
# calc loss
loss_node_step = F.binary_cross_entropy(p_node,a_node)
# loss_node_step.backward(retain_graph=True)
loss += loss_node_step
loss_node += loss_node_step.data*p_node.size(1)
else:
# calc loss
loss_addedge_step = F.binary_cross_entropy(p_addedge, Variable(torch.zeros((1, 1))).cuda())
# loss_addedge_step.backward(retain_graph=True)
loss += loss_addedge_step
loss_addedge += loss_addedge_step.data
break
edge_count += 1
node_count += 1
loss_all = loss_addnode + loss_addedge + loss_node
# if epoch % args.epochs_log==0:
# print('Epoch: {}/{}, train loss: {:.6f}, graph type: {}, hidden: {}'.format(
# epoch, args.epochs,loss_all[0], args.graph_type, args.node_embedding_size))
return loss_all[0]/len(dataset)
def test_DGMG_epoch(args, model, is_fast=False):
model.eval()
graph_num = args.test_graph_num
graphs_generated = []
for i in range(graph_num):
# NOTE: when starting loop, we assume a node has already been generated
node_neighbor = [[]] # list of lists (first node is zero)
node_embedding = [Variable(torch.ones(1,args.node_embedding_size)).cuda()] # list of torch tensors, each size: 1*hidden
node_count = 1
while node_count<=args.max_num_node:
# 1 message passing
# do 2 times message passing
node_embedding = message_passing(node_neighbor, node_embedding, model)
# 2 graph embedding and new node embedding
node_embedding_cat = torch.cat(node_embedding, dim=0)
graph_embedding = calc_graph_embedding(node_embedding_cat, model)
init_embedding = calc_init_embedding(node_embedding_cat, model)
# 3 f_addnode
p_addnode = model.f_an(graph_embedding)
a_addnode = sample_tensor(p_addnode)
# print(a_addnode.data[0][0])
if a_addnode.data[0][0]==1:
# print('add node')
# add node
node_neighbor.append([])
node_embedding.append(init_embedding)
if is_fast:
node_embedding_cat = torch.cat(node_embedding, dim=0)
else:
break
edge_count = 0
while edge_count<args.max_num_node:
if not is_fast:
node_embedding = message_passing(node_neighbor, node_embedding, model)
node_embedding_cat = torch.cat(node_embedding, dim=0)
graph_embedding = calc_graph_embedding(node_embedding_cat, model)
# 4 f_addedge
p_addedge = model.f_ae(graph_embedding)
a_addedge = sample_tensor(p_addedge)
# print(a_addedge.data[0][0])
if a_addedge.data[0][0]==1:
# print('add edge')
# 5 f_nodes
# excluding the last node (which is the new node)
node_new_embedding_cat = node_embedding_cat[-1,:].expand(node_embedding_cat.size(0)-1,node_embedding_cat.size(1))
s_node = model.f_s(torch.cat((node_embedding_cat[0:-1,:],node_new_embedding_cat),dim=1))
p_node = F.softmax(s_node.permute(1,0))
a_node = gumbel_softmax(p_node, temperature=0.01)
_, a_node_id = a_node.topk(1)
a_node_id = int(a_node_id.data[0][0])
# add edge
node_neighbor[-1].append(a_node_id)
node_neighbor[a_node_id].append(len(node_neighbor)-1)
else:
break
edge_count += 1
node_count += 1
# save graph
node_neighbor_dict = dict(zip(list(range(len(node_neighbor))), node_neighbor))
graph = nx.from_dict_of_lists(node_neighbor_dict)
graphs_generated.append(graph)
return graphs_generated
########### train function for LSTM + VAE
def train_DGMG(args, dataset_train, model):
# check if load existing model
if args.load:
fname = args.model_save_path + args.fname + 'model_' + str(args.load_epoch) + '.dat'
model.load_state_dict(torch.load(fname))
args.lr = 0.00001
epoch = args.load_epoch
print('model loaded!, lr: {}'.format(args.lr))
else:
epoch = 1
# initialize optimizer
optimizer = optim.Adam(list(model.parameters()), lr=args.lr)
scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=args.lr_rate)
# start main loop
time_all = np.zeros(args.epochs)
while epoch <= args.epochs:
time_start = tm.time()
# train
train_DGMG_epoch(epoch, args, model, dataset_train, optimizer, scheduler, is_fast=args.is_fast)
time_end = tm.time()
time_all[epoch - 1] = time_end - time_start
# print('time used',time_all[epoch - 1])
# test
if epoch % args.epochs_test == 0 and epoch >= args.epochs_test_start:
graphs = test_DGMG_epoch(args,model, is_fast=args.is_fast)
fname = args.graph_save_path + args.fname_pred + str(epoch) + '.dat'
save_graph_list(graphs, fname)
# print('test done, graphs saved')
# save model checkpoint
if args.save:
if epoch % args.epochs_save == 0:
fname = args.model_save_path + args.fname + 'model_' + | |
from datetime import datetime, timedelta, time
from pyiso.base import BaseClient
import copy
import re
from bs4 import BeautifulSoup
import time
import pdb
# PRC_LMP
# PRC_HASP_LMP
# PRC_RTPD_LMP
# PRC_INTVL_LMP
# PRC_AS - All Ancillary Services for Region and Sub-Regional Partition. Posted hourly in $/MW for the DAM and HASP.
# PRC_INVL_AS - Posts 15-Minute price relevant to the next 15 minute binding interval for RTM
# PRC_CURR_LMP - Posts all LMP data for the most current interval
"""
Returned data is a list of dicts, each of which has a time code as the main term which is used for indexing. e.g. the following is the result of this code:
mycaiso = caiso.CAISOClient()
mydata = mycaiso.get_generation(latest=True)
mydata
[{'timestamp': datetime.datetime(2014, 12, 11, 6, 20, tzinfo=<UTC>), 'gen_MW': 1678.0, 'fuel_name': 'renewable', 'ba_name': 'CAISO', 'freq': '10m', 'market': 'RT5M'},
{'timestamp': datetime.datetime(2014, 12, 11, 6, 20, tzinfo=<UTC>), 'gen_MW': 447.0, 'fuel_name': 'wind', 'ba_name': 'CAISO', 'freq': '10m', 'market': 'RT5M'},
{'gen_MW': 26155.37, 'ba_name': 'CAISO', 'timestamp': datetime.datetime(2014, 12, 11, 6, 20, tzinfo=<UTC>), 'freq': '10m', 'fuel_name': 'other', 'market': 'RT5M'}]
this can then be pulled into a pandas dataframe:
import pandas as pd
df = pd.DataFrame(data)
"""
"""
fruitful methods:
get_generation(self, latest=False, yesterday=False,start_at=False, end_at=False, **kwargs):
get_load(self, latest=False,start_at=False, end_at=False, **kwargs)
get_trade(self, latest=False, start_at=False, end_at=False, **kwargs)
get_lmp(self, latest=False,start_at=False, end_at=False, market='hourly', grp_type='ALL',node='ALL',**kwargs)
construct_oasis_payload(self, queryname, preferred_start_at=None, **kwargs)
fetch_oasis(self, payload={})
parsing methods:
parse_generation
parse_lmp(self,raw_data)
parse_oasis_slrs(self, raw_data)
parse_oasis_renewable(self, raw_data)
parse_oasis_demand_forecast(self, raw_data)
parse_todays_outlook_renewables(self, soup, ts)
"""
class CAISOClient(BaseClient):
"""
Interface to CAISO data sources.
For information about the data sources,
see http://www.caiso.com/Documents/InterfaceSpecifications-OASISv4_1_3.pdf
"""
NAME = 'CAISO'
base_url_oasis = 'http://oasis.caiso.com/oasisapi/SingleZip'
base_url_gen = 'http://content.caiso.com/green/renewrpt/'
base_url_outlook = 'http://content.caiso.com/outlook/SP/'
base_payload = {'version': 1}
oasis_request_time_format = '%Y%m%dT%H:%M-0000'
TZ_NAME = 'America/Los_Angeles'
fuels = {
'GEOTHERMAL': 'geo',
'BIOMASS': 'biomass',
'BIOGAS': 'biogas',
'SMALL HYDRO': 'smhydro',
'WIND TOTAL': 'wind',
'SOLAR': 'solar',
'SOLAR PV': 'solarpv',
'SOLAR THERMAL': 'solarth',
'NUCLEAR': 'nuclear',
'THERMAL': 'thermal',
'HYDRO': 'hydro',
}
oasis_markets = { # {'RT5M': 'RTM', 'DAHR': 'DAM', 'RTHR': 'HASP'}
BaseClient.MARKET_CHOICES.hourly: 'HASP',
BaseClient.MARKET_CHOICES.fivemin: 'RTM', # There are actually three codes used: RTPD (Real-time Pre-dispatch), RTD (real-time dispatch), and RTM (Real-Time Market). I can't figure out what the difference is.
BaseClient.MARKET_CHOICES.dam: 'DAM',
}
def get_generation(self, latest=False, yesterday=False,
start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='gen', latest=latest, yesterday=yesterday,
start_at=start_at, end_at=end_at, **kwargs)
# ensure market and freq are set
if 'market' not in self.options:
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.fivemin
if 'freq' not in self.options:
if self.options['forecast']:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
else:
self.options['freq'] = self.FREQUENCY_CHOICES.fivemin
if latest:
return self._generation_latest()
elif self.options['forecast']:
return self._generation_forecast()
else:
return self._generation_historical()
def get_lmp(self, latest=False,
start_at=False, end_at=False, market='hourly', grp_type='ALL',node='ALL',csv=False, **kwargs):
# Construct_oasis_payload expects market option to be one of 'hourly', 'fivemin', 'tenmin', 'na', 'dam'
# if csv = False, pulls xml files, parses SLOWLY, and returned data is
# a list of dicts, each of which has a main index of the timestamp
# if csv=True, pulls csv files, parses more quickly, and returns Pandas
# Panel data structure
# Expected parameters:
# node: CAISO node ID. Can be set to individual node or "ALL". "ALL" will override grp_type
# grp_type: either "ALL_APNodes" or "ALL" - This will trigger day-by-day iteration
# NOTE: This needs to be turned off for processing individual nodes. This will override node types
# market= "DAM", "HASP", "RTM"
# start_at and end_at can be a variety of parsable input types, with or without time codes
# i.e. '2013-10-12T11:45:30' or '2011-10-12'
# Relevant XML Calls:
# PRC_LMP - for market_run_id='DAM'
# PRC_HASP_LMP for market_run_id='HASP'
# PRC_INTVL_LMP for market_run_id='RTM'
# PRC_RTPD_LMP No longer valid?
# Max call interval:
# In the HASP and RTM markets, requesting more than the max interval length may result in the wrong data being returned.
# Individual nodes: <31 days
# Calling "ALL" or "ALL_APNODES":
# DAM: 1 day, returns 4 files from expanded zip. Each has 20-line header
# HASP: 1 hour, returns one file with all components (LMP, MCC, MCE, MCL)
# RTM: 1 hour, returns one file with all components (LMP, MCC, MCE, MCL)
#PRC_LMP
# if grp_type=="ALL" or "ALL_APNODES", we are processing full node sets:
# remove 'node' from the payload
# can only process one time step at a time,
# Time step for DAM = 1 day; time step otherwise = 1 hr
#
# if node is not "ALL", we are dealing with a specific node:
# remove grp_type from payload
# Check to make sure that the date is less than 31 days or cut into pieces
# set args
self.handle_options(data='load', latest=latest,
start_at=start_at, end_at=end_at, market=market, grp_type=grp_type,node=node, **kwargs)
requestSpan = self.options['end_at'] - self.options['start_at'] # This is the duration spanned by our request
requestStart = self.options['start_at'] #This should be a datetime object
requestEnd = self.options['end_at'] # This should be a datetime object
print 'Request span is:',requestSpan
# ensure market and freq are set # What is this for?
if 'market' not in self.options:
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.fivemin
"""if 'freq' not in self.options: # What is the frequency used for?
if self.options['forecast']:
self.options['freq'] = self.FREQUENCY_CHOICES.hourly
else:
self.options['freq'] = self.FREQUENCY_CHOICES.fivemin
"""
# Clean up conflicting commands
# Check this: this may currently be buggy when requesting grp_type=ALL_APNODES but excluding 'node' in the call
if self.options['node']=='ALL' and self.options['grp_type']!='ALL':
del self.options['grp_type'] # Node typically overrides the grp_type call
# Decision fork: either we are handing "all" nodes or we are handling an individual node
if self.options['grp_type']=='ALL' or self.options['grp_type']=='ALL_APNodes':
# If we are processing full node sets, need to iterate across the appropriate time blocks
del self.options['node'] # Get rid of node commands to ensure we aren't sending mixed signals. This will override the node option.
if market=='DAHR':
print ('The DAM LMP call is not yet implemented... you should go do that.')
else: # We are not in DAM, but in HASP or RTM
# If we are requesting all nodes in the Hour-ahead market or real-time markets, we can request at most one hour at a time
if market=='RTHR':
# This is a request for the Hour-Ahead Scheduling Process (HASP)
oasis_API_call= 'PRC_HASP_LMP'
else: #if ':market=='RTM
# Assume that this is a request for the real-time market
oasis_API_call= 'PRC_INTVL_LMP'
parsed_data = [] # Placeholder
currentStartAt = requestStart # Initialize loop conditions
currentEndAt = currentStartAt
# The contents of the following if statement can probably be refactored
if requestSpan.total_seconds()>3600:
timeStep = timedelta(hours=1) # Increment by one hour each iteration
currentEndAt = currentEndAt + timeStep # Priming the pump
# The following loop can probably be refactored significantly
while currentEndAt < requestEnd:
# Set up payload, fetch data, and parse data
self.options['start_at']=currentStartAt
self.options['end_at']=currentEndAt
payload = self.construct_oasis_payload(oasis_API_call,csv=csv)
print 'Requesting data for time starting at ', (currentStartAt).strftime(self.oasis_request_time_format)
startRequest = time.clock()
oasis_data = self.fetch_oasis(payload=payload)
endRequest = time.clock()
print 'Imported data in ',endRequest-startRequest,' s'
parsed_data.append(self.parse_lmp(oasis_data,csv=csv))
print 'Parsed Data in ', time.clock()-endRequest,' s'
currentStartAt = currentEndAt
currentEndAt = currentEndAt + timeStep
# Previous 'if' block was to get us within one time step of the finish. This will get us the rest of the way.
#Clean up final iteration to get to the end time
print 'Exited the loop'
self.options['start_at']=currentStartAt
self.options['end_at']=requestEnd
payload = self.construct_oasis_payload(oasis_API_call,csv=csv)
print 'Requesting data for time starting at ', (currentStartAt).strftime(self.oasis_request_time_format)
oasis_data = self.fetch_oasis(payload=payload)
parsed_data.append(self.parse_lmp(oasis_data,csv))
result = parsed_data
#merge dataframes if you have been pulling csv's
if csv:
for i in range(len(parsed_data)):
if i == 0: result = parsed_data[0]
else:
result = result.append(parsed_data[i])
result = result.unstack()
result.columns = result.columns.droplevel()
else:
# If we aren't handling full node sets, we are handling individual nodes and can request up to 31 days of data at a time
print('The single-node calls are not yet implemented... you should go do that.')
# Return either just the most recent datapoint, or return all the parsed data
# It seems like this could be moved to a separate function
# Commenting out for now because it looks like it needs a specific data structure, i.e. a dict with a 'timestamp' key
"""
if self.options['latest']: | |
else:
# If item is already inside another selected item,
# remove that selection
super_selection = self._selected_super_item(item)
if super_selection:
self._remove_selection(super_selection)
# Remove selections this selection will override.
sub_selections = self._selected_sub_items(item)
for sub in sub_selections:
self._remove_selection(sub)
if state:
self._add_selection(item)
elif item in self._selection:
self._remove_selection(item)
self._re_enumerate_selections()
self.selectionChanged.emit()
def _add_selection(self, item):
"""Add selection rooted at item
"""
outline = self._selection_poly(item)
selection_item = QGraphicsPathItem(self)
selection_item.setPos(self.contentsRect().topLeft())
selection_item.setPen(make_pen(width=1, cosmetic=True))
transform = self._transform
path = transform.map(outline)
margin = 4
if item.node.is_leaf:
ppath = QPainterPath()
ppath.addRect(
path.boundingRect().adjusted(-margin, -margin, margin, margin)
)
else:
ppath = QPainterPath()
ppath.addPolygon(path)
ppath = path_outline(ppath, width=margin * 2)
selection_item.setPath(ppath)
selection_item.unscaled_path = outline
self._selection[item] = selection_item
def _remove_selection(self, item):
"""Remove selection rooted at item."""
selection_item = self._selection[item]
selection_item.hide()
selection_item.setParentItem(None)
if self.scene():
self.scene().removeItem(selection_item)
del self._selection[item]
def _selected_sub_items(self, item):
"""Return all selected subclusters under item."""
def branches(item):
return [self._items[ch] for ch in item.node.branches]
res = []
for item in list(preorder(item, branches))[1:]:
if item in self._selection:
res.append(item)
return res
def _selected_super_item(self, item):
"""Return the selected super item if it exists."""
def branches(item):
return [self._items[ch] for ch in item.node.branches]
for selected_item in self._selection:
if item in set(preorder(selected_item, branches)):
return selected_item
return None
def _re_enumerate_selections(self):
"""Re enumerate the selection items and update the colors."""
# Order the clusters
items = sorted(
self._selection.items(), key=lambda item: item[0].node.value.first
)
palette = colorpalette.ColorPaletteGenerator(len(items))
for i, (item, selection_item) in enumerate(items):
# delete and then reinsert to update the ordering
del self._selection[item]
self._selection[item] = selection_item
color = palette[i]
color.setAlpha(150)
selection_item.setBrush(QColor(color))
def _selection_poly(self, item):
# type: (Tree) -> QPolygonF
"""
Return an selection geometry covering item and all its children.
"""
def left(item):
return [self._items[ch] for ch in item.node.branches[:1]]
def right(item):
return [self._items[ch] for ch in item.node.branches[-1:]]
itemsleft = list(preorder(item, left))[::-1]
itemsright = list(preorder(item, right))
# itemsleft + itemsright walks from the leftmost leaf up to the root
# and down to the rightmost leaf
assert itemsleft[0].node.is_leaf
assert itemsright[-1].node.is_leaf
if item.node.is_leaf:
# a single anchor point
vert = [itemsleft[0].element.anchor]
else:
vert = []
for it in itemsleft[1:]:
vert.extend([it.element.path[0], it.element.path[1], it.element.anchor])
for it in itemsright[:-1]:
vert.extend(
[it.element.anchor, it.element.path[-2], it.element.path[-1]]
)
# close the polygon
vert.append(vert[0])
def isclose(a, b, rel_tol=1e-6):
return abs(a - b) < rel_tol * max(abs(a), abs(b))
def isclose_p(p1, p2, rel_tol=1e-6):
return isclose(p1.x, p2.x, rel_tol) and isclose(p1.y, p2.y, rel_tol)
# merge consecutive vertices that are (too) close
acc = [vert[0]]
for v in vert[1:]:
if not isclose_p(v, acc[-1]):
acc.append(v)
vert = acc
return QPolygonF([QPointF(*p) for p in vert])
def _update_selection_items(self):
"""Update the shapes of selection items after a scale change.
"""
transform = self._transform
for item, selection in self._selection.items():
path = transform.map(selection.unscaled_path)
ppath = QPainterPath()
margin = 4
if item.node.is_leaf:
ppath.addRect(
path.boundingRect().adjusted(-margin, -margin, margin, margin)
)
else:
ppath.addPolygon(path)
ppath = path_outline(ppath, width=margin * 2)
selection.setPath(ppath)
def _relayout(self):
if not self._root:
return
self._layout = dendrogram_path(self._root, self.orientation)
for node_geom in postorder(self._layout):
node, geom = node_geom.value
item = self._items[node]
item.element = geom
# the untransformed source path
item.sourcePath = path_toQtPath(geom)
r = item.sourcePath.boundingRect()
base = self._root.value.height
if self.orientation == Left:
r.setRight(base)
elif self.orientation == Right:
r.setLeft(0)
elif self.orientation == Top:
r.setBottom(base)
else:
r.setTop(0)
hitarea = QPainterPath()
hitarea.addRect(r)
item.sourceAreaShape = hitarea
item.setGeometryData(item.sourcePath, item.sourceAreaShape)
item.setZValue(-node.value.height)
def _rescale(self):
if self._root is None:
return
crect = self.contentsRect()
leaf_count = len(list(leaves(self._root)))
if self.orientation in [Left, Right]:
drect = QSizeF(self._root.value.height, leaf_count)
else:
drect = QSizeF(leaf_count, self._root.value.height)
eps = np.finfo(np.float64).eps
if abs(drect.width()) < eps:
sx = 1.0
else:
sx = crect.width() / drect.width()
if abs(drect.height()) < eps:
sy = 1.0
else:
sy = crect.height() / drect.height()
transform = QTransform().scale(sx, sy)
self._transform = transform
self._itemgroup.setPos(crect.topLeft())
self._itemgroup.setGeometry(crect)
for node_geom in postorder(self._layout):
node, _ = node_geom.value
item = self._items[node]
item.setGeometryData(
transform.map(item.sourcePath), transform.map(item.sourceAreaShape)
)
self._selection_items = None
self._update_selection_items()
def sizeHint(self, which, constraint=QSizeF()):
fm = QFontMetrics(self.font())
spacing = fm.lineSpacing()
mleft, mtop, mright, mbottom = self.getContentsMargins()
if self._root and which == Qt.PreferredSize:
nleaves = len([node for node in self._items.keys() if not node.branches])
base = max(10, min(spacing * 16, 250))
if self.orientation in [self.Left, self.Right]:
return QSizeF(base, spacing * nleaves + mleft + mright)
else:
return QSizeF(spacing * nleaves + mtop + mbottom, base)
elif which == Qt.MinimumSize:
return QSizeF(mleft + mright + 10, mtop + mbottom + 10)
else:
return QSizeF()
def sceneEventFilter(self, obj, event):
if isinstance(obj, DendrogramWidget.ClusterGraphicsItem):
if (
event.type() == QEvent.GraphicsSceneHoverEnter
and self.__hoverHighlightEnabled
):
self._set_hover_item(obj)
event.accept()
return True
elif (
event.type() == QEvent.GraphicsSceneMousePress
and event.button() == Qt.LeftButton
):
is_selected = self.is_selected(obj)
is_included = self.is_included(obj)
current_selection = list(self._selection)
if self.__selectionMode == DendrogramWidget.SingleSelection:
if event.modifiers() & Qt.ControlModifier:
self.set_selected_items([obj] if not is_selected else [])
elif event.modifiers() & Qt.AltModifier:
self.set_selected_items([])
elif event.modifiers() & Qt.ShiftModifier:
if not is_included:
self.set_selected_items([obj])
elif current_selection != [obj]:
self.set_selected_items([obj])
elif self.__selectionMode == DendrogramWidget.ExtendedSelection:
if event.modifiers() & Qt.ControlModifier:
self.select_item(obj, not is_selected)
elif event.modifiers() & Qt.AltModifier:
self.select_item(self._selected_super_item(obj), False)
elif event.modifiers() & Qt.ShiftModifier:
if not is_included:
self.select_item(obj, True)
elif current_selection != [obj]:
self.set_selected_items([obj])
if current_selection != self._selection:
self.selectionEdited.emit()
self.itemClicked.emit(obj)
event.accept()
return True
if event.type() == QEvent.GraphicsSceneHoverLeave:
self._set_hover_item(None)
return super().sceneEventFilter(obj, event)
def changeEvent(self, event):
super().changeEvent(event)
if event.type() == QEvent.FontChange:
self.updateGeometry()
# QEvent.ContentsRectChange is missing in PyQt4 <= 4.11.3
if event.type() == 178: # QEvent.ContentsRectChange:
self._rescale()
def resizeEvent(self, event):
super().resizeEvent(event)
self._rescale()
def mousePressEvent(self, event):
QGraphicsWidget.mousePressEvent(self, event)
# A mouse press on an empty widget part
if event.modifiers() == Qt.NoModifier and self._selection:
self.set_selected_clusters([])
class OWHierarchicalClustering(widget.OWWidget):
name = "Hierarchical Clustering"
description = (
"Display a dendrogram of a hierarchical clustering "
"constructed from the input distance matrix."
)
icon = "icons/HierarchicalClustering.svg"
priority = 2100
class Inputs:
distances = Input("Distances", Orange.misc.DistMatrix)
class Outputs:
selected_data = Output("Selected Data", Orange.data.Table, default=True)
annotated_data = Output(ANNOTATED_DATA_SIGNAL_NAME, Orange.data.Table)
settingsHandler = settings.DomainContextHandler()
#: Selected linkage
linkage = settings.Setting(1)
#: Index of the selected annotation item (variable, ...)
annotation = settings.ContextSetting("Enumeration")
#: Out-of-context setting for the case when the "Name" option is available
annotation_if_names = settings.Setting("Name")
#: Out-of-context setting for the case with just "Enumerate" and "None"
annotation_if_enumerate = settings.Setting("Enumerate")
#: Selected tree pruning (none/max depth)
pruning = settings.Setting(0)
#: Maximum depth when max depth pruning is selected
max_depth = settings.Setting(10)
#: Selected cluster selection method (none, cut distance, top n)
selection_method = settings.Setting(0)
#: Cut height ratio wrt root height
cut_ratio = settings.Setting(75.0)
#: Number of top clusters to select
top_n = settings.Setting(3)
#: Dendrogram zoom factor
zoom_factor = settings.Setting(0)
append_clusters = settings.Setting(True)
cluster_role = settings.Setting(2)
cluster_name = settings.Setting("Cluster")
autocommit = settings.Setting(True)
graph_name = "scene"
#: Cluster variable domain role
AttributeRole, ClassRole, MetaRole = 0, 1, 2
cluster_roles = ["Attribute", "Class variable", "Meta variable"]
basic_annotations = ["None", "Enumeration"]
class Error(widget.OWWidget.Error):
not_finite_distances = Msg("Some distances are infinite")
def __init__(self):
super().__init__()
self.matrix = None
self.items = None
self.linkmatrix = None
self.root = None
self._displayed_root = None
self.cutoff_height = 0.0
gui.comboBox(
self.controlArea,
self,
"linkage",
items=LINKAGE,
box="Linkage",
callback=self._invalidate_clustering,
)
model = itemmodels.VariableListModel()
model[:] = self.basic_annotations
self.label_cb = gui.comboBox(
self.controlArea,
self,
"annotation",
box="Annotation",
model=model,
callback=self._update_labels,
contentsLength=12,
)
box = gui.radioButtons(
self.controlArea,
self,
"pruning",
box="Pruning",
callback=self._invalidate_pruning,
)
grid = QGridLayout()
box.layout().addLayout(grid)
grid.addWidget(gui.appendRadioButton(box, "None", addToLayout=False), 0, 0)
self.max_depth_spin = gui.spin(
box,
self,
"max_depth",
minv=1,
maxv=100,
callback=self._invalidate_pruning,
keyboardTracking=False,
)
grid.addWidget(
gui.appendRadioButton(box, "Max depth:", addToLayout=False), 1, 0
)
grid.addWidget(self.max_depth_spin, 1, 1)
self.selection_box = gui.radioButtons(
self.controlArea,
self,
"selection_method",
box="Selection",
callback=self._selection_method_changed,
)
grid = QGridLayout()
self.selection_box.layout().addLayout(grid)
grid.addWidget(
gui.appendRadioButton(self.selection_box, "Manual", addToLayout=False), 0, 0
)
grid.addWidget(
gui.appendRadioButton(
self.selection_box, "Height ratio:", addToLayout=False
),
1,
0,
)
self.cut_ratio_spin = gui.spin(
self.selection_box,
self,
"cut_ratio",
0,
100,
step=1e-1,
spinType=float,
callback=self._selection_method_changed,
)
self.cut_ratio_spin.setSuffix("%")
grid.addWidget(self.cut_ratio_spin, 1, 1)
grid.addWidget(
gui.appendRadioButton(self.selection_box, "Top N:", addToLayout=False), 2, 0
)
self.top_n_spin = gui.spin(
self.selection_box,
self,
"top_n",
1,
20,
callback=self._selection_method_changed,
)
grid.addWidget(self.top_n_spin, 2, 1)
self.zoom_slider = gui.hSlider(
self.controlArea,
self,
"zoom_factor",
box="Zoom",
minValue=-6,
maxValue=3,
step=1,
ticks=True,
createLabel=False,
callback=self.__update_font_scale,
)
zoom_in = QAction(
"Zoom in", self, shortcut=QKeySequence.ZoomIn, triggered=self.__zoom_in
)
zoom_out = QAction(
"Zoom out", self, shortcut=QKeySequence.ZoomOut, triggered=self.__zoom_out
)
zoom_reset = QAction(
"Reset zoom",
self,
shortcut=QKeySequence(Qt.ControlModifier | Qt.Key_0),
triggered=self.__zoom_reset,
)
self.addActions([zoom_in, zoom_out, zoom_reset])
self.controlArea.layout().addStretch()
box = gui.vBox(self.controlArea, "Output")
gui.checkBox(
box,
self,
"append_clusters",
"Append cluster IDs",
callback=self._invalidate_output,
)
ibox = gui.indentedBox(box)
name_edit = gui.lineEdit(ibox, self, "cluster_name")
name_edit.editingFinished.connect(self._invalidate_output)
cb = gui.comboBox(
ibox,
self,
"cluster_role",
callback=self._invalidate_output,
items=self.cluster_roles,
)
form = QFormLayout(
fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,
labelAlignment=Qt.AlignLeft,
spacing=8,
)
form.addRow("Name:", name_edit)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.